2015-06-12 19:49:23 +00:00
package proc
2015-04-19 22:11:33 +00:00
2015-08-10 13:55:57 +00:00
import (
2023-11-10 14:32:20 +00:00
"bytes"
2017-08-24 07:46:47 +00:00
"debug/dwarf"
2023-11-10 14:32:20 +00:00
"encoding/binary"
2016-02-02 11:26:29 +00:00
"errors"
2015-08-10 13:55:57 +00:00
"fmt"
2018-07-06 07:37:31 +00:00
"go/constant"
2021-07-28 15:18:20 +00:00
"reflect"
2023-06-27 16:33:07 +00:00
"strings"
2017-02-08 00:23:47 +00:00
2019-01-04 18:39:25 +00:00
"github.com/go-delve/delve/pkg/dwarf/frame"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/reader"
2023-06-27 16:33:07 +00:00
"github.com/go-delve/delve/pkg/logflags"
2015-08-10 13:55:57 +00:00
)
2015-04-19 22:11:33 +00:00
2018-03-20 10:05:35 +00:00
// This code is partly adapted from runtime.gentraceback in
2017-02-08 13:14:57 +00:00
// $GOROOT/src/runtime/traceback.go
2016-01-10 08:57:52 +00:00
// Stackframe represents a frame in a system stack.
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
//
// Each stack frame has two locations Current and Call.
//
// For the topmost stackframe Current and Call are the same location.
//
// For stackframes after the first Current is the location corresponding to
// the return address and Call is the location of the CALL instruction that
// was last executed on the frame. Note however that Call.PC is always equal
// to Current.PC, because finding the correct value for Call.PC would
// require disassembling each function in the stacktrace.
//
// For synthetic stackframes generated for inlined function calls Current.Fn
// is the function containing the inlining and Call.Fn in the inlined
// function.
2015-08-28 20:06:29 +00:00
type Stackframe struct {
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
Current , Call Location
2017-10-05 07:26:19 +00:00
// Frame registers.
Regs op . DwarfRegisters
2017-05-16 18:23:33 +00:00
// High address of the stack.
2017-09-01 13:34:13 +00:00
stackHi uint64
2016-06-21 20:05:28 +00:00
// Return address for this stack frame (as read from the stack frame itself).
2016-07-03 07:02:21 +00:00
Ret uint64
2018-03-20 10:05:35 +00:00
// Err is set if an error occurred during stacktrace
2017-06-23 11:31:05 +00:00
Err error
2017-09-01 13:34:13 +00:00
// SystemStack is true if this frame belongs to a system stack.
SystemStack bool
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
// Inlined is true if this frame is actually an inlined call.
Inlined bool
2024-07-01 18:22:59 +00:00
// hasInlines is true if this frame is a concrete function that is executing inlined calls (i.e. if there is at least one inlined call frame on top of this one).
hasInlines bool
2018-08-18 10:12:39 +00:00
// Bottom is true if this is the bottom of the stack
Bottom bool
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
// lastpc is a memory address guaranteed to belong to the last instruction
// executed in this stack frame.
// For the topmost stack frame this will be the same as Current.PC and
// Call.PC, for other stack frames it will usually be Current.PC-1, but
// could be different when inlined calls are involved in the stacktrace.
// Note that this address isn't guaranteed to belong to the start of an
// instruction and, for this reason, should not be propagated outside of
// pkg/proc.
// Use this value to determine active lexical scopes for the stackframe.
lastpc uint64
2018-07-06 07:37:31 +00:00
2024-07-11 17:26:38 +00:00
// closurePtr is the value of .closureptr, if present. This variable is
// used to correlated range-over-func closure bodies with their enclosing
// function.
closurePtr int64
2018-07-06 07:37:31 +00:00
// TopmostDefer is the defer that would be at the top of the stack when a
// panic unwind would get to this call frame, in other words it's the first
// deferred function that will be called if the runtime unwinds past this
// call frame.
TopmostDefer * Defer
// Defers is the list of functions deferred by this stack frame (so far).
Defers [ ] * Defer
2017-09-01 13:34:13 +00:00
}
// FrameOffset returns the address of the stack frame, absolute for system
// stack frames or as an offset from stackhi for goroutine stacks (a
// negative value).
func ( frame * Stackframe ) FrameOffset ( ) int64 {
if frame . SystemStack {
return frame . Regs . CFA
}
return frame . Regs . CFA - int64 ( frame . stackHi )
}
// FramePointerOffset returns the value of the frame pointer, absolute for
// system stack frames or as an offset from stackhi for goroutine stacks (a
// negative value).
func ( frame * Stackframe ) FramePointerOffset ( ) int64 {
if frame . SystemStack {
return int64 ( frame . Regs . BP ( ) )
}
return int64 ( frame . Regs . BP ( ) ) - int64 ( frame . stackHi )
2015-09-17 08:42:34 +00:00
}
2024-07-11 17:26:38 +00:00
// contains returns true if off is between CFA and SP
func ( frame * Stackframe ) contains ( off int64 ) bool {
p := uint64 ( off + int64 ( frame . stackHi ) )
return frame . Regs . SP ( ) < p && p <= uint64 ( frame . Regs . CFA )
}
2017-07-26 22:57:47 +00:00
// ThreadStacktrace returns the stack trace for thread.
2015-07-01 03:16:52 +00:00
// Note the locations in the array are return addresses not call addresses.
2023-06-27 16:33:07 +00:00
func ThreadStacktrace ( tgt * Target , thread Thread , depth int ) ( [ ] Stackframe , error ) {
2017-09-01 13:34:13 +00:00
g , _ := GetG ( thread )
if g == nil {
2020-05-13 18:56:50 +00:00
regs , err := thread . Registers ( )
2017-09-01 13:34:13 +00:00
if err != nil {
return nil , err
}
2019-08-08 03:15:20 +00:00
so := thread . BinInfo ( ) . PCToImage ( regs . PC ( ) )
2021-04-28 17:00:26 +00:00
dwarfRegs := * ( thread . BinInfo ( ) . Arch . RegistersToDwarfRegisters ( so . StaticBase , regs ) )
2021-03-04 18:28:28 +00:00
dwarfRegs . ChangeFunc = thread . SetReg
2023-06-27 16:33:07 +00:00
it := newStackIterator ( tgt , thread . BinInfo ( ) , thread . ProcessMemory ( ) , dwarfRegs , 0 , nil , 0 )
2017-09-01 13:34:13 +00:00
return it . stacktrace ( depth )
2015-04-19 22:11:33 +00:00
}
2023-06-27 16:33:07 +00:00
return GoroutineStacktrace ( tgt , g , depth , 0 )
2015-06-17 17:11:57 +00:00
}
2023-06-27 16:33:07 +00:00
func goroutineStackIterator ( tgt * Target , g * G , opts StacktraceOptions ) ( * stackIterator , error ) {
2020-03-10 16:34:40 +00:00
bi := g . variable . bi
2017-04-21 06:55:53 +00:00
if g . Thread != nil {
2020-05-13 18:56:50 +00:00
regs , err := g . Thread . Registers ( )
2017-02-15 13:41:03 +00:00
if err != nil {
return nil , err
}
2020-03-10 16:34:40 +00:00
so := bi . PCToImage ( regs . PC ( ) )
2021-04-28 17:00:26 +00:00
dwarfRegs := * ( bi . Arch . RegistersToDwarfRegisters ( so . StaticBase , regs ) )
2021-03-04 18:28:28 +00:00
dwarfRegs . ChangeFunc = g . Thread . SetReg
2019-08-08 03:44:37 +00:00
return newStackIterator (
2023-06-27 16:33:07 +00:00
tgt , bi , g . variable . mem ,
2021-03-04 18:28:28 +00:00
dwarfRegs ,
2021-06-17 12:35:33 +00:00
g . stack . hi , g , opts ) , nil
2016-03-18 08:51:48 +00:00
}
2019-08-08 03:44:37 +00:00
so := g . variable . bi . PCToImage ( g . PC )
return newStackIterator (
2023-06-27 16:33:07 +00:00
tgt , bi , g . variable . mem ,
2020-03-30 18:03:29 +00:00
bi . Arch . addrAndStackRegsToDwarfRegisters ( so . StaticBase , g . PC , g . SP , g . BP , g . LR ) ,
2021-06-17 12:35:33 +00:00
g . stack . hi , g , opts ) , nil
2016-03-18 08:51:48 +00:00
}
2019-09-25 17:21:20 +00:00
type StacktraceOptions uint16
const (
// StacktraceReadDefers requests a stacktrace decorated with deferred calls
// for each frame.
StacktraceReadDefers StacktraceOptions = 1 << iota
// StacktraceSimple requests a stacktrace where no stack switches will be
// attempted.
StacktraceSimple
// StacktraceG requests a stacktrace starting with the register
// values saved in the runtime.g structure.
StacktraceG
)
2023-06-27 16:33:07 +00:00
// GoroutineStacktrace returns the stack trace for a goroutine.
2016-03-18 08:51:48 +00:00
// Note the locations in the array are return addresses not call addresses.
2023-06-27 16:33:07 +00:00
func GoroutineStacktrace ( tgt * Target , g * G , depth int , opts StacktraceOptions ) ( [ ] Stackframe , error ) {
it , err := goroutineStackIterator ( tgt , g , opts )
2016-03-18 08:51:48 +00:00
if err != nil {
return nil , err
2015-06-17 17:11:57 +00:00
}
2018-07-06 07:37:31 +00:00
frames , err := it . stacktrace ( depth )
if err != nil {
return nil , err
}
2019-09-25 17:21:20 +00:00
if opts & StacktraceReadDefers != 0 {
2018-07-06 07:37:31 +00:00
g . readDefers ( frames )
}
return frames , nil
2015-06-17 17:11:57 +00:00
}
2016-01-10 08:57:52 +00:00
// NullAddrError is an error for a null address.
2015-05-07 21:55:06 +00:00
type NullAddrError struct { }
func ( n NullAddrError ) Error ( ) string {
return "NULL address"
}
2016-03-18 08:51:48 +00:00
// stackIterator holds information
2016-01-10 08:57:52 +00:00
// required to iterate and walk the program
// stack.
2016-03-18 08:51:48 +00:00
type stackIterator struct {
2023-06-27 16:33:07 +00:00
pc uint64
top bool
atend bool
2024-01-18 00:31:04 +00:00
sigret bool
2023-06-27 16:33:07 +00:00
frame Stackframe
target * Target
bi * BinaryInfo
mem MemoryReadWriter
err error
2015-10-16 06:42:02 +00:00
2021-06-17 12:35:33 +00:00
stackhi uint64
systemstack bool
2017-10-05 07:26:19 +00:00
2017-11-17 18:29:24 +00:00
// regs is the register set for the current frame
regs op . DwarfRegisters
2017-09-01 13:34:13 +00:00
2020-02-13 17:12:59 +00:00
g * G // the goroutine being stacktraced, nil if we are stacktracing a goroutine-less thread
g0_sched_sp uint64 // value of g0.sched.sp (see comments around its use)
g0_sched_sp_loaded bool // g0_sched_sp was loaded from g0
2019-09-25 17:21:20 +00:00
2024-06-04 19:52:30 +00:00
count int
2019-09-25 17:21:20 +00:00
opts StacktraceOptions
2017-02-08 13:14:57 +00:00
}
2023-06-27 16:33:07 +00:00
func newStackIterator ( tgt * Target , bi * BinaryInfo , mem MemoryReadWriter , regs op . DwarfRegisters , stackhi uint64 , g * G , opts StacktraceOptions ) * stackIterator {
2017-09-01 13:34:13 +00:00
systemstack := true
if g != nil {
systemstack = g . SystemStack
}
2023-06-27 16:33:07 +00:00
return & stackIterator { pc : regs . PC ( ) , regs : regs , top : true , target : tgt , bi : bi , mem : mem , err : nil , atend : false , stackhi : stackhi , systemstack : systemstack , g : g , opts : opts }
2015-10-16 06:42:02 +00:00
}
2016-01-10 08:57:52 +00:00
// Next points the iterator to the next stack frame.
2016-03-18 08:51:48 +00:00
func ( it * stackIterator ) Next ( ) bool {
2015-10-16 06:42:02 +00:00
if it . err != nil || it . atend {
return false
}
2020-01-21 17:11:20 +00:00
2023-06-27 16:33:07 +00:00
if logflags . Stack ( ) {
logger := logflags . StackLogger ( )
w := & strings . Builder { }
fmt . Fprintf ( w , "current pc = %#x CFA = %#x FrameBase = %#x " , it . pc , it . regs . CFA , it . regs . FrameBase )
for i := 0 ; i < it . regs . CurrentSize ( ) ; i ++ {
reg := it . regs . Reg ( uint64 ( i ) )
if reg == nil {
continue
}
name , _ , _ := it . bi . Arch . DwarfRegisterToString ( i , reg )
fmt . Fprintf ( w , " %s = %#x" , name , reg . Uint64Val )
}
logger . Debugf ( "%s" , w . String ( ) )
}
2017-11-17 18:29:24 +00:00
callFrameRegs , ret , retaddr := it . advanceRegs ( )
2017-10-05 07:26:19 +00:00
it . frame = it . newStackframe ( ret , retaddr )
2015-10-16 06:42:02 +00:00
2023-06-27 16:33:07 +00:00
if logflags . Stack ( ) {
logger := logflags . StackLogger ( )
fnname := "?"
if it . frame . Call . Fn != nil {
fnname = it . frame . Call . Fn . Name
}
logger . Debugf ( "new frame %#x %s:%d at %s" , it . frame . Call . PC , it . frame . Call . File , it . frame . Call . Line , fnname )
}
if it . frame . Current . Fn != nil && it . frame . Current . Fn . Name == "runtime.sigtrampgo" && it . target != nil {
regs , err := it . readSigtrampgoContext ( )
if err != nil {
logflags . DebuggerLogger ( ) . Errorf ( "could not read runtime.sigtrampgo context: %v" , err )
} else {
so := it . bi . PCToImage ( regs . PC ( ) )
regs . StaticBase = so . StaticBase
it . pc = regs . PC ( )
it . regs = * regs
it . top = false
if it . g != nil && it . g . ID != 0 {
2023-11-14 15:36:55 +00:00
it . systemstack = ! ( it . regs . SP ( ) >= it . g . stack . lo && it . regs . SP ( ) < it . g . stack . hi )
2023-06-27 16:33:07 +00:00
}
logflags . StackLogger ( ) . Debugf ( "sigtramp context read" )
return true
}
}
2019-09-25 17:21:20 +00:00
if it . opts & StacktraceSimple == 0 {
2020-03-30 18:03:29 +00:00
if it . bi . Arch . switchStack ( it , & callFrameRegs ) {
2023-06-27 16:33:07 +00:00
logflags . StackLogger ( ) . Debugf ( "stack switched" )
2019-09-25 17:21:20 +00:00
return true
}
2015-10-16 06:42:02 +00:00
}
2017-10-13 20:13:43 +00:00
if it . frame . Ret <= 0 {
it . atend = true
return true
}
2024-01-18 00:31:04 +00:00
it . sigret = it . frame . Current . Fn != nil && it . frame . Current . Fn . Name == "runtime.sigpanic"
2015-10-16 06:42:02 +00:00
it . top = false
it . pc = it . frame . Ret
2017-11-17 18:29:24 +00:00
it . regs = callFrameRegs
2015-10-16 06:42:02 +00:00
return true
}
2019-08-01 23:31:50 +00:00
func ( it * stackIterator ) switchToGoroutineStack ( ) {
it . systemstack = false
it . top = false
it . pc = it . g . PC
it . regs . Reg ( it . regs . SPRegNum ) . Uint64Val = it . g . SP
2020-03-20 18:32:41 +00:00
it . regs . AddReg ( it . regs . BPRegNum , op . DwarfRegisterFromUint64 ( it . g . BP ) )
2024-10-11 19:34:25 +00:00
if it . bi . Arch . Name == "arm64" || it . bi . Arch . Name == "ppc64le" || it . bi . Arch . Name == "riscv64" {
2020-01-21 17:11:20 +00:00
it . regs . Reg ( it . regs . LRRegNum ) . Uint64Val = it . g . LR
}
2019-08-01 23:31:50 +00:00
}
2016-01-10 08:57:52 +00:00
// Frame returns the frame the iterator is pointing at.
2016-03-18 08:51:48 +00:00
func ( it * stackIterator ) Frame ( ) Stackframe {
2018-08-18 10:12:39 +00:00
it . frame . Bottom = it . atend
2015-10-16 06:42:02 +00:00
return it . frame
}
2016-01-10 08:57:52 +00:00
// Err returns the error encountered during stack iteration.
2016-03-18 08:51:48 +00:00
func ( it * stackIterator ) Err ( ) error {
2015-10-16 06:42:02 +00:00
return it . err
}
2017-08-24 07:46:47 +00:00
// frameBase calculates the frame base pseudo-register for DWARF for fn and
// the current frame.
func ( it * stackIterator ) frameBase ( fn * Function ) int64 {
2023-06-14 11:23:46 +00:00
if fn . cu . image . Stripped ( ) {
return 0
}
2020-03-20 17:23:10 +00:00
dwarfTree , err := fn . cu . image . getDwarfTree ( fn . offset )
2017-08-24 07:46:47 +00:00
if err != nil {
return 0
}
2021-08-03 16:51:15 +00:00
fb , _ , _ , _ := it . bi . Location ( dwarfTree . Entry , dwarf . AttrFrameBase , it . pc , it . regs , it . mem )
2017-08-24 07:46:47 +00:00
return fb
}
2017-10-05 07:26:19 +00:00
func ( it * stackIterator ) newStackframe ( ret , retaddr uint64 ) Stackframe {
2015-08-28 20:06:29 +00:00
if retaddr == 0 {
2017-10-05 07:26:19 +00:00
it . err = NullAddrError { }
return Stackframe { }
2015-08-28 20:06:29 +00:00
}
2017-10-05 07:26:19 +00:00
f , l , fn := it . bi . PCToLine ( it . pc )
if fn == nil {
f = "?"
l = - 1
2017-08-24 07:46:47 +00:00
} else {
it . regs . FrameBase = it . frameBase ( fn )
2015-08-28 20:06:29 +00:00
}
2023-05-17 16:10:19 +00:00
r := Stackframe { Current : Location { PC : it . pc , File : f , Line : l , Fn : fn } , Regs : it . regs , Ret : ret , stackHi : it . stackhi , SystemStack : it . systemstack , lastpc : it . pc }
2021-12-22 18:26:21 +00:00
if r . Regs . Reg ( it . regs . PCRegNum ) == nil {
r . Regs . AddReg ( it . regs . PCRegNum , op . DwarfRegisterFromUint64 ( it . pc ) )
}
2019-03-18 17:08:23 +00:00
r . Call = r . Current
2024-01-18 00:31:04 +00:00
if ! it . top && r . Current . Fn != nil && it . pc != r . Current . Fn . Entry && ! it . sigret {
2019-03-18 17:08:23 +00:00
// if the return address is the entry point of the function that
// contains it then this is some kind of fake return frame (for example
// runtime.sigreturn) that didn't actually call the current frame,
// attempting to get the location of the CALL instruction would just
// obfuscate what's going on, since there is no CALL instruction.
switch r . Current . Fn . Name {
2017-09-01 13:34:13 +00:00
case "runtime.mstart" , "runtime.systemstack_switch" :
// these frames are inserted by runtime.systemstack and there is no CALL
// instruction to look for at pc - 1
default :
2019-03-18 17:08:23 +00:00
r . lastpc = it . pc - 1
r . Call . File , r . Call . Line = r . Current . Fn . cu . lineInfo . PCToLine ( r . Current . Fn . Entry , it . pc - 1 )
2017-10-05 07:26:19 +00:00
}
2015-09-17 08:42:34 +00:00
}
2024-07-11 17:26:38 +00:00
if fn != nil && ! fn . cu . image . Stripped ( ) && ! r . SystemStack && it . g != nil {
dwarfTree , _ := fn . cu . image . getDwarfTree ( fn . offset )
if dwarfTree != nil {
c := readLocalPtrVar ( dwarfTree , goClosurePtr , it . target , it . bi , fn . cu . image , r . Regs , it . mem )
if c != 0 {
if c >= it . g . stack . lo && c < it . g . stack . hi {
r . closurePtr = int64 ( c ) - int64 ( it . g . stack . hi )
} else {
r . closurePtr = int64 ( c )
}
}
}
}
2017-10-05 07:26:19 +00:00
return r
2015-08-28 20:06:29 +00:00
}
2016-03-18 08:51:48 +00:00
func ( it * stackIterator ) stacktrace ( depth int ) ( [ ] Stackframe , error ) {
2016-02-02 11:26:29 +00:00
if depth < 0 {
return nil , errors . New ( "negative maximum stack depth" )
}
2015-08-28 20:06:29 +00:00
frames := make ( [ ] Stackframe , 0 , depth + 1 )
2024-06-04 19:52:30 +00:00
it . stacktraceFunc ( func ( frame Stackframe ) bool {
frames = append ( frames , frame )
return len ( frames ) < depth + 1
} )
2015-10-16 06:42:02 +00:00
if err := it . Err ( ) ; err != nil {
2017-06-23 11:31:05 +00:00
if len ( frames ) == 0 {
return nil , err
}
frames = append ( frames , Stackframe { Err : err } )
2015-04-19 22:11:33 +00:00
}
2015-08-28 20:06:29 +00:00
return frames , nil
2015-04-19 22:11:33 +00:00
}
2017-10-05 07:26:19 +00:00
2024-06-04 19:52:30 +00:00
func ( it * stackIterator ) stacktraceFunc ( callback func ( Stackframe ) bool ) {
if it . opts & StacktraceG != 0 && it . g != nil {
it . switchToGoroutineStack ( )
it . top = true
}
for it . Next ( ) {
if ! it . appendInlineCalls ( callback , it . Frame ( ) ) {
break
}
}
}
func ( it * stackIterator ) appendInlineCalls ( callback func ( Stackframe ) bool , frame Stackframe ) bool {
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
if frame . Call . Fn == nil {
2024-06-04 19:52:30 +00:00
it . count ++
return callback ( frame )
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
}
if frame . Call . Fn . cu . lineInfo == nil {
2024-06-04 19:52:30 +00:00
it . count ++
return callback ( frame )
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
}
callpc := frame . Call . PC
2024-06-04 19:52:30 +00:00
if it . count > 0 {
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
callpc --
}
2020-03-20 17:23:10 +00:00
dwarfTree , err := frame . Call . Fn . cu . image . getDwarfTree ( frame . Call . Fn . offset )
if err != nil {
2024-06-04 19:52:30 +00:00
it . count ++
return callback ( frame )
2020-03-20 17:23:10 +00:00
}
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
2020-03-20 17:23:10 +00:00
for _ , entry := range reader . InlineStack ( dwarfTree , callpc ) {
2024-07-01 18:22:59 +00:00
frame . hasInlines = true
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
fnname , okname := entry . Val ( dwarf . AttrName ) . ( string )
fileidx , okfileidx := entry . Val ( dwarf . AttrCallFile ) . ( int64 )
line , okline := entry . Val ( dwarf . AttrCallLine ) . ( int64 )
if ! okname || ! okfileidx || ! okline {
break
}
2021-01-29 17:23:52 +00:00
var e * dwarf . Entry
filepath , fileErr := frame . Current . Fn . cu . filePath ( int ( fileidx ) , e )
if fileErr != nil {
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
break
}
2020-03-20 17:23:10 +00:00
inlfn := & Function { Name : fnname , Entry : frame . Call . Fn . Entry , End : frame . Call . Fn . End , offset : entry . Offset , cu : frame . Call . Fn . cu }
2024-06-04 19:52:30 +00:00
it . count ++
callback ( Stackframe {
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
Current : frame . Current ,
Call : Location {
frame . Call . PC ,
frame . Call . File ,
frame . Call . Line ,
inlfn ,
} ,
Regs : frame . Regs ,
stackHi : frame . stackHi ,
Ret : frame . Ret ,
Err : frame . Err ,
SystemStack : frame . SystemStack ,
Inlined : true ,
lastpc : frame . lastpc ,
2024-07-11 17:26:38 +00:00
closurePtr : frame . closurePtr ,
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
} )
2021-01-29 17:23:52 +00:00
frame . Call . File = filepath
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
frame . Call . Line = int ( line )
}
2024-06-04 19:52:30 +00:00
it . count ++
return callback ( frame )
proc: support inlining
Go 1.10 added inlined calls to debug_info, this commit adds support
for DW_TAG_inlined_call to delve, both for stack traces (where
inlined calls will appear as normal stack frames) and to correct
the behavior of next, step and stepout.
The calls to Next and Frame of stackIterator continue to work
unchanged and only return real stack frames, after reading each line
appendInlinedCalls is called to unpacked all the inlined calls that
involve the current PC.
The fake stack frames produced by appendInlinedCalls are
distinguished from real stack frames by having the Inlined attribute
set to true. Also their Current and Call locations are treated
differently. The Call location will be changed to represent the
position inside the inlined call, while the Current location will
always reference the real stack frame. This is done because:
* next, step and stepout need to access the debug_info entry of
the real function they are stepping through
* we are already manipulating Call in different ways while Current
is just what we read from the call stack
The strategy remains mostly the same, we disassemble the function
and we set a breakpoint on each instruction corresponding to a
different file:line. The function in question will be the one
corresponding to the first real (i.e. non-inlined) stack frame.
* If the current function contains inlined calls, 'next' will not
set any breakpoints on instructions that belong to inlined calls. We
do not do this for 'step'.
* If we are inside an inlined call that makes other inlined
functions, 'next' will not set any breakpoints that belong to
inlined calls that are children of the current inlined call.
* If the current function is inlined the breakpoint on the return
address won't be set, because inlined frames don't have a return
address.
* The code we use for stepout doesn't work at all if we are inside
an inlined call, instead we call 'next' but instruct it to remove
all PCs belonging to the current inlined call.
2017-11-13 15:54:08 +00:00
}
2020-12-14 17:00:27 +00:00
// advanceRegs calculates the DwarfRegisters for a next stack frame
// (corresponding to it.pc).
//
// The computation uses the registers for the current stack frame (it.regs) and
// the corresponding Frame Descriptor Entry (FDE) retrieved from the DWARF info.
//
// The new set of registers is returned. it.regs is not updated, except for
// it.regs.CFA; the caller has to eventually switch it.regs when the iterator
// advances to the next frame.
2017-11-17 18:29:24 +00:00
func ( it * stackIterator ) advanceRegs ( ) ( callFrameRegs op . DwarfRegisters , ret uint64 , retaddr uint64 ) {
2017-10-05 07:26:19 +00:00
fde , err := it . bi . frameEntries . FDEForPC ( it . pc )
var framectx * frame . FrameContext
2018-08-31 18:08:18 +00:00
if _ , nofde := err . ( * frame . ErrNoFDEForPC ) ; nofde {
2020-03-30 18:03:29 +00:00
framectx = it . bi . Arch . fixFrameUnwindContext ( nil , it . pc , it . bi )
2017-10-05 07:26:19 +00:00
} else {
2020-03-30 18:03:29 +00:00
framectx = it . bi . Arch . fixFrameUnwindContext ( fde . EstablishFrame ( it . pc ) , it . pc , it . bi )
2017-10-05 07:26:19 +00:00
}
2023-06-27 16:33:07 +00:00
logger := logflags . StackLogger ( )
logger . Debugf ( "advanceRegs at %#x" , it . pc )
2017-10-05 07:26:19 +00:00
cfareg , err := it . executeFrameRegRule ( 0 , framectx . CFA , 0 )
if cfareg == nil {
2021-09-28 19:07:42 +00:00
it . err = fmt . Errorf ( "CFA becomes undefined at PC %#x: %v" , it . pc , err )
2019-05-08 21:06:38 +00:00
return op . DwarfRegisters { } , 0 , 0
2017-10-05 07:26:19 +00:00
}
2023-06-27 16:33:07 +00:00
if logflags . Stack ( ) {
logger . Debugf ( "\tCFA rule %s -> %#x" , ruleString ( & framectx . CFA , it . bi . Arch . RegnumToString ) , cfareg . Uint64Val )
}
2017-10-05 07:26:19 +00:00
it . regs . CFA = int64 ( cfareg . Uint64Val )
2019-08-08 03:15:20 +00:00
callimage := it . bi . PCToImage ( it . pc )
2019-05-08 21:06:38 +00:00
2022-11-15 08:05:43 +00:00
callFrameRegs = op . DwarfRegisters {
StaticBase : callimage . StaticBase ,
ByteOrder : it . regs . ByteOrder ,
PCRegNum : it . regs . PCRegNum ,
SPRegNum : it . regs . SPRegNum ,
BPRegNum : it . regs . BPRegNum ,
LRRegNum : it . regs . LRRegNum ,
}
2017-10-05 07:26:19 +00:00
// According to the standard the compiler should be responsible for emitting
// rules for the RSP register so that it can then be used to calculate CFA,
// however neither Go nor GCC do this.
// In the following line we copy GDB's behaviour by assuming this is
// implicit.
// See also the comment in dwarf2_frame_default_init in
2023-07-07 16:30:38 +00:00
// $GDB_SOURCE/dwarf2/frame.c
2020-03-10 16:34:40 +00:00
callFrameRegs . AddReg ( callFrameRegs . SPRegNum , cfareg )
2017-10-05 07:26:19 +00:00
for i , regRule := range framectx . Regs {
2023-06-27 16:33:07 +00:00
if logflags . Stack ( ) {
logger . Debugf ( "\t%s rule %s " , it . bi . Arch . RegnumToString ( i ) , ruleString ( & regRule , it . bi . Arch . RegnumToString ) )
}
2017-10-05 07:26:19 +00:00
reg , err := it . executeFrameRegRule ( i , regRule , it . regs . CFA )
2023-06-27 16:33:07 +00:00
if reg != nil {
logger . Debugf ( "\t\t-> %#x" , reg . Uint64Val )
} else {
logger . Debugf ( "\t\t-> nothing (%v)" , err )
}
2017-11-17 18:29:24 +00:00
callFrameRegs . AddReg ( i , reg )
2017-10-05 07:26:19 +00:00
if i == framectx . RetAddrReg {
if reg == nil {
if err == nil {
2021-09-28 19:07:42 +00:00
//lint:ignore ST1005 backwards compatibility
2017-10-05 07:26:19 +00:00
err = fmt . Errorf ( "Undefined return address at %#x" , it . pc )
}
it . err = err
} else {
ret = reg . Uint64Val
2023-11-10 14:32:20 +00:00
// On systems which use a link register to store the return address of a function,
// certain leaf functions may not have correct DWARF information present in the
// .debug_frame FDE when unwinding after a fatal signal. This is due to the fact
// that runtime.sigpanic inserts a frame to make it look like the function which
// triggered the signal called runtime.sigpanic directly, making the value of the
// link register unreliable. Instead, treat it as a non-leaf function and read the
// return address from the stack. For more details, see:
// https://github.com/golang/go/issues/63862#issuecomment-1802672629.
if it . frame . Call . Fn != nil && it . frame . Call . Fn . Name == "runtime.sigpanic" && it . bi . Arch . usesLR {
buf := make ( [ ] byte , 8 )
_ , err := it . mem . ReadMemory ( buf , uint64 ( it . regs . CFA ) )
if err != nil {
it . err = err
}
binary . Read ( bytes . NewReader ( buf ) , binary . LittleEndian , & ret )
}
2017-10-05 07:26:19 +00:00
}
retaddr = uint64 ( it . regs . CFA + regRule . Offset )
}
}
2024-10-11 19:34:25 +00:00
if it . bi . Arch . Name == "arm64" || it . bi . Arch . Name == "ppc64le" || it . bi . Arch . Name == "riscv64" {
2020-05-13 18:56:50 +00:00
if ret == 0 && it . regs . Reg ( it . regs . LRRegNum ) != nil {
ret = it . regs . Reg ( it . regs . LRRegNum ) . Uint64Val
2020-01-21 17:11:20 +00:00
}
}
2017-11-17 18:29:24 +00:00
return callFrameRegs , ret , retaddr
2017-10-05 07:26:19 +00:00
}
func ( it * stackIterator ) executeFrameRegRule ( regnum uint64 , rule frame . DWRule , cfa int64 ) ( * op . DwarfRegister , error ) {
switch rule . Rule {
default :
fallthrough
case frame . RuleUndefined :
return nil , nil
case frame . RuleSameVal :
2020-01-21 17:11:20 +00:00
if it . regs . Reg ( regnum ) == nil {
return nil , nil
}
2017-09-01 13:34:13 +00:00
reg := * it . regs . Reg ( regnum )
return & reg , nil
2017-10-05 07:26:19 +00:00
case frame . RuleOffset :
return it . readRegisterAt ( regnum , uint64 ( cfa + rule . Offset ) )
case frame . RuleValOffset :
return op . DwarfRegisterFromUint64 ( uint64 ( cfa + rule . Offset ) ) , nil
case frame . RuleRegister :
return it . regs . Reg ( rule . Reg ) , nil
case frame . RuleExpression :
2021-08-03 16:51:15 +00:00
v , _ , err := op . ExecuteStackProgram ( it . regs , rule . Expression , it . bi . Arch . PtrSize ( ) , it . mem . ReadMemory )
2017-10-05 07:26:19 +00:00
if err != nil {
return nil , err
}
return it . readRegisterAt ( regnum , uint64 ( v ) )
case frame . RuleValExpression :
2021-08-03 16:51:15 +00:00
v , _ , err := op . ExecuteStackProgram ( it . regs , rule . Expression , it . bi . Arch . PtrSize ( ) , it . mem . ReadMemory )
2017-10-05 07:26:19 +00:00
if err != nil {
return nil , err
}
return op . DwarfRegisterFromUint64 ( uint64 ( v ) ) , nil
case frame . RuleArchitectural :
return nil , errors . New ( "architectural frame rules are unsupported" )
case frame . RuleCFA :
if it . regs . Reg ( rule . Reg ) == nil {
return nil , nil
}
return op . DwarfRegisterFromUint64 ( uint64 ( int64 ( it . regs . Uint64Val ( rule . Reg ) ) + rule . Offset ) ) , nil
2017-09-01 13:34:13 +00:00
case frame . RuleFramePointer :
curReg := it . regs . Reg ( rule . Reg )
if curReg == nil {
2017-10-05 07:26:19 +00:00
return nil , nil
}
2017-09-01 13:34:13 +00:00
if curReg . Uint64Val <= uint64 ( cfa ) {
return it . readRegisterAt ( regnum , curReg . Uint64Val )
}
2018-08-31 18:08:18 +00:00
newReg := * curReg
return & newReg , nil
2017-10-05 07:26:19 +00:00
}
}
func ( it * stackIterator ) readRegisterAt ( regnum uint64 , addr uint64 ) ( * op . DwarfRegister , error ) {
2020-03-30 18:03:29 +00:00
buf := make ( [ ] byte , it . bi . Arch . regSize ( regnum ) )
2020-09-09 17:36:15 +00:00
_ , err := it . mem . ReadMemory ( buf , addr )
2017-10-05 07:26:19 +00:00
if err != nil {
return nil , err
}
return op . DwarfRegisterFromBytes ( buf ) , nil
}
2018-07-06 07:37:31 +00:00
2020-02-13 17:12:59 +00:00
func ( it * stackIterator ) loadG0SchedSP ( ) {
if it . g0_sched_sp_loaded {
return
}
it . g0_sched_sp_loaded = true
if it . g != nil {
2020-02-17 17:27:56 +00:00
mvar , _ := it . g . variable . structMember ( "m" )
if mvar != nil {
g0var , _ := mvar . structMember ( "g0" )
if g0var != nil {
g0 , _ := g0var . parseG ( )
if g0 != nil {
it . g0_sched_sp = g0 . SP
}
2020-02-13 17:12:59 +00:00
}
}
}
}
2018-07-06 07:37:31 +00:00
// Defer represents one deferred call
type Defer struct {
2021-07-28 15:18:20 +00:00
DwrapPC uint64 // PC of the deferred function or, in Go 1.17+ a wrapper to it
2021-07-08 15:47:53 +00:00
DeferPC uint64 // PC address of instruction that added this defer
SP uint64 // Value of SP register when this function was deferred (this field gets adjusted when the stack is moved to match the new stack space)
link * Defer // Next deferred function
2021-07-28 15:18:20 +00:00
argSz int64 // Always 0 in Go >=1.17
2018-07-06 07:37:31 +00:00
2024-06-04 19:52:30 +00:00
rangefunc [ ] * Defer // See explanation in $GOROOT/src/runtime/panic.go, comment to function runtime.deferrangefunc (this is the equivalent of the rangefunc variable and head fields, combined)
2018-07-06 07:37:31 +00:00
variable * Variable
Unreadable error
}
// readDefers decorates the frames with the function deferred at each stack frame.
func ( g * G ) readDefers ( frames [ ] Stackframe ) {
curdefer := g . Defer ( )
i := 0
// scan simultaneously frames and the curdefer linked list, assigning
// defers to their associated frames.
for {
if curdefer == nil || i >= len ( frames ) {
return
}
if curdefer . Unreadable != nil {
// Current defer is unreadable, stick it into the first available frame
// (so that it can be reported to the user) and exit
frames [ i ] . Defers = append ( frames [ i ] . Defers , curdefer )
return
}
if frames [ i ] . Err != nil {
return
}
if frames [ i ] . TopmostDefer == nil {
2024-06-04 19:52:30 +00:00
frames [ i ] . TopmostDefer = curdefer . topdefer ( )
2018-07-06 07:37:31 +00:00
}
2024-07-15 04:27:47 +00:00
if frames [ i ] . SystemStack || frames [ i ] . Inlined || curdefer . SP >= uint64 ( frames [ i ] . Regs . CFA ) {
2018-07-06 07:37:31 +00:00
// frames[i].Regs.CFA is the value that SP had before the function of
// frames[i] was called.
// This means that when curdefer.SP == frames[i].Regs.CFA then curdefer
// was added by the previous frame.
//
// curdefer.SP < frames[i].Regs.CFA means curdefer was added by a
// function further down the stack.
//
// SystemStack frames live on a different physical stack and can't be
// compared with deferred frames.
i ++
} else {
2024-06-04 19:52:30 +00:00
if len ( curdefer . rangefunc ) > 0 {
frames [ i ] . Defers = append ( frames [ i ] . Defers , curdefer . rangefunc ... )
} else {
frames [ i ] . Defers = append ( frames [ i ] . Defers , curdefer )
}
2018-07-06 07:37:31 +00:00
curdefer = curdefer . Next ( )
}
}
}
2024-06-04 19:52:30 +00:00
const maxRangeFuncDefers = 10
func ( d * Defer ) load ( canrecur bool ) {
2021-08-23 18:32:02 +00:00
v := d . variable // +rtype _defer
v . loadValue ( LoadConfig { false , 1 , 0 , 0 , - 1 , 0 } )
if v . Unreadable != nil {
d . Unreadable = v . Unreadable
2018-07-06 07:37:31 +00:00
return
}
2021-08-23 18:32:02 +00:00
fnvar := v . fieldVariable ( "fn" )
2021-07-28 15:18:20 +00:00
if fnvar . Kind == reflect . Func {
// In Go 1.18, fn is a func().
d . DwrapPC = fnvar . Base
} else if val := fnvar . maybeDereference ( ) ; val . Addr != 0 {
// In Go <1.18, fn is a *funcval.
2018-07-06 07:37:31 +00:00
fnvar = fnvar . loadFieldNamed ( "fn" )
if fnvar . Unreadable == nil {
2021-07-08 15:47:53 +00:00
d . DwrapPC , _ = constant . Uint64Val ( fnvar . Value )
2018-07-06 07:37:31 +00:00
}
}
2021-08-23 18:32:02 +00:00
d . DeferPC , _ = constant . Uint64Val ( v . fieldVariable ( "pc" ) . Value ) // +rtype uintptr
d . SP , _ = constant . Uint64Val ( v . fieldVariable ( "sp" ) . Value ) // +rtype uintptr
sizVar := v . fieldVariable ( "siz" ) // +rtype -opt int32
2021-07-28 15:18:20 +00:00
if sizVar != nil {
// In Go <1.18, siz stores the number of bytes of
// defer arguments following the defer record. In Go
// 1.18, the defer record doesn't store arguments, so
// we leave this 0.
d . argSz , _ = constant . Int64Val ( sizVar . Value )
}
2018-07-06 07:37:31 +00:00
2021-08-23 18:32:02 +00:00
linkvar := v . fieldVariable ( "link" ) . maybeDereference ( ) // +rtype *_defer
2018-07-06 07:37:31 +00:00
if linkvar . Addr != 0 {
d . link = & Defer { variable : linkvar }
}
2024-06-04 19:52:30 +00:00
if canrecur {
h := v
for _ , fieldname := range [ ] string { "head" , "u" , "value" } {
if h == nil {
return
}
h = h . loadFieldNamed ( fieldname )
}
if h != nil {
h := h . newVariable ( "" , h . Addr , pointerTo ( linkvar . DwarfType , h . bi . Arch ) , h . mem ) . maybeDereference ( )
if h . Addr != 0 {
hd := & Defer { variable : h }
for {
hd . load ( false )
d . rangefunc = append ( d . rangefunc , hd )
if hd . link == nil {
break
}
if len ( d . rangefunc ) > maxRangeFuncDefers {
// We don't have a way to know for sure that we haven't gone completely off-road while loading this list so limit it to an arbitrary maximum size.
break
}
hd = hd . link
}
}
}
}
2018-07-06 07:37:31 +00:00
}
2018-08-31 18:08:18 +00:00
// errSPDecreased is used when (*Defer).Next detects a corrupted linked
2022-12-28 11:41:13 +00:00
// list, specifically when after following a link pointer the value of SP
2018-07-06 07:37:31 +00:00
// decreases rather than increasing or staying the same (the defer list is a
// FIFO list, nodes further down the list have been added by function calls
// further down the call stack and therefore the SP should always increase).
2018-08-31 18:08:18 +00:00
var errSPDecreased = errors . New ( "corrupted defer list: SP decreased" )
2018-07-06 07:37:31 +00:00
// Next returns the next defer in the linked list
func ( d * Defer ) Next ( ) * Defer {
if d . link == nil {
return nil
}
2024-06-04 19:52:30 +00:00
d . link . load ( true )
2018-07-06 07:37:31 +00:00
if d . link . SP < d . SP {
2018-08-31 18:08:18 +00:00
d . link . Unreadable = errSPDecreased
2018-07-06 07:37:31 +00:00
}
return d . link
}
2018-07-10 10:15:11 +00:00
2024-06-04 19:52:30 +00:00
func ( d * Defer ) topdefer ( ) * Defer {
if len ( d . rangefunc ) > 0 {
return d . rangefunc [ 0 ]
}
return d
}
2018-07-10 10:15:11 +00:00
// EvalScope returns an EvalScope relative to the argument frame of this deferred call.
// The argument frame of a deferred call is stored in memory immediately
// after the deferred header.
2021-07-02 16:37:55 +00:00
func ( d * Defer ) EvalScope ( t * Target , thread Thread ) ( * EvalScope , error ) {
scope , err := GoroutineScope ( t , thread )
2018-07-10 10:15:11 +00:00
if err != nil {
return nil , fmt . Errorf ( "could not get scope: %v" , err )
}
bi := thread . BinInfo ( )
2021-07-08 15:47:53 +00:00
scope . PC = d . DwrapPC
scope . File , scope . Line , scope . Fn = bi . PCToLine ( d . DwrapPC )
2018-07-10 10:15:11 +00:00
if scope . Fn == nil {
2021-07-08 15:47:53 +00:00
return nil , fmt . Errorf ( "could not find function at %#x" , d . DwrapPC )
2018-07-10 10:15:11 +00:00
}
// The arguments are stored immediately after the defer header struct, i.e.
2020-10-22 15:26:19 +00:00
// addr+sizeof(_defer).
if ! bi . Arch . usesLR {
// On architectures that don't have a link register CFA is always the address of the first
// argument, that's what we use for the value of CFA.
// For SP we use CFA minus the size of one pointer because that would be
// the space occupied by pushing the return address on the stack during the
// CALL.
scope . Regs . CFA = ( int64 ( d . variable . Addr ) + d . variable . RealType . Common ( ) . ByteSize )
scope . Regs . Reg ( scope . Regs . SPRegNum ) . Uint64Val = uint64 ( scope . Regs . CFA - int64 ( bi . Arch . PtrSize ( ) ) )
} else {
// On architectures that have a link register CFA and SP have the same
// value but the address of the first argument is at CFA+ptrSize so we set
// CFA to the start of the argument frame minus one pointer size.
scope . Regs . CFA = int64 ( d . variable . Addr ) + d . variable . RealType . Common ( ) . ByteSize - int64 ( bi . Arch . PtrSize ( ) )
scope . Regs . Reg ( scope . Regs . SPRegNum ) . Uint64Val = uint64 ( scope . Regs . CFA )
}
2018-07-10 10:15:11 +00:00
2019-05-08 21:06:38 +00:00
rdr := scope . Fn . cu . image . dwarfReader
rdr . Seek ( scope . Fn . offset )
e , err := rdr . Next ( )
2018-07-10 10:15:11 +00:00
if err != nil {
return nil , fmt . Errorf ( "could not read DWARF function entry: %v" , err )
}
2021-08-03 16:51:15 +00:00
scope . Regs . FrameBase , _ , _ , _ = bi . Location ( e , dwarf . AttrFrameBase , scope . PC , scope . Regs , scope . Mem )
2020-09-09 17:36:15 +00:00
scope . Mem = cacheMemory ( scope . Mem , uint64 ( scope . Regs . CFA ) , int ( d . argSz ) )
2018-07-10 10:15:11 +00:00
return scope , nil
}
2021-07-08 15:47:53 +00:00
// DeferredFunc returns the deferred function, on Go 1.17 and later unwraps
// any defer wrapper.
func ( d * Defer ) DeferredFunc ( p * Target ) ( file string , line int , fn * Function ) {
bi := p . BinInfo ( )
fn = bi . PCToFunc ( d . DwrapPC )
fn = p . dwrapUnwrap ( fn )
if fn == nil {
return "" , 0 , nil
}
2023-06-14 11:23:46 +00:00
file , line = bi . EntryLineForFunc ( fn )
2021-07-08 15:47:53 +00:00
return file , line , fn
}
2023-06-27 16:33:07 +00:00
func ruleString ( rule * frame . DWRule , regnumToString func ( uint64 ) string ) string {
switch rule . Rule {
case frame . RuleUndefined :
return "undefined"
case frame . RuleSameVal :
return "sameval"
case frame . RuleOffset :
return fmt . Sprintf ( "[cfa+%d]" , rule . Offset )
case frame . RuleValOffset :
return fmt . Sprintf ( "cfa+%d" , rule . Offset )
case frame . RuleRegister :
return fmt . Sprintf ( "R(%d)" , rule . Reg )
case frame . RuleExpression :
w := & strings . Builder { }
op . PrettyPrint ( w , rule . Expression , regnumToString )
return fmt . Sprintf ( "[expr(%s)]" , w . String ( ) )
case frame . RuleValExpression :
w := & strings . Builder { }
op . PrettyPrint ( w , rule . Expression , regnumToString )
return fmt . Sprintf ( "expr(%s)" , w . String ( ) )
case frame . RuleArchitectural :
return "architectural"
case frame . RuleCFA :
return fmt . Sprintf ( "R(%d)+%d" , rule . Reg , rule . Offset )
case frame . RuleFramePointer :
return fmt . Sprintf ( "[R(%d)] framepointer" , rule . Reg )
default :
return fmt . Sprintf ( "unknown_rule(%d)" , rule . Rule )
}
}
2024-06-04 19:52:30 +00:00
// rangeFuncStackTrace, if the topmost frame of the stack is a the body of a
// range-over-func statement, returns a slice containing the stack of range
2024-07-15 04:27:47 +00:00
// bodies on the stack, interleaved with their return frames, the frame of
// the function containing them and finally the function that called it.
2024-06-04 19:52:30 +00:00
//
// For example, given:
//
// func f() {
// for _ := range iterator1 {
// for _ := range iterator2 {
// fmt.Println() // <- YOU ARE HERE
// }
// }
// }
//
// It will return the following frames:
//
// 0. f-range2()
2024-07-15 04:27:47 +00:00
// 1. function that called f-range2
// 2. f-range1()
// 3. function that called f-range1
// 4. f()
// 5. function that called f()
2024-06-04 19:52:30 +00:00
//
// If the topmost frame of the stack is *not* the body closure of a
// range-over-func statement then nothing is returned.
func rangeFuncStackTrace ( tgt * Target , g * G ) ( [ ] Stackframe , error ) {
if g == nil {
return nil , nil
}
it , err := goroutineStackIterator ( tgt , g , StacktraceSimple )
if err != nil {
return nil , err
}
frames := [ ] Stackframe { }
2024-07-15 04:27:47 +00:00
const (
startStage = iota
normalStage
lastFrameStage
doneStage
)
stage := startStage
addRetFrame := false
2024-06-04 19:52:30 +00:00
var rangeParent * Function
nonMonotonicSP := false
2024-07-11 17:26:38 +00:00
var closurePtr int64
2024-09-18 21:16:34 +00:00
optimized := func ( fn * Function ) bool {
return fn . cu . optimized & optimizedOptimized != 0
}
2024-07-11 17:26:38 +00:00
appendFrame := func ( fr Stackframe ) {
frames = append ( frames , fr )
if fr . closurePtr != 0 {
closurePtr = fr . closurePtr
}
2024-07-15 04:27:47 +00:00
addRetFrame = true
2024-07-11 17:26:38 +00:00
}
closurePtrOk := func ( fr * Stackframe ) bool {
if fr . SystemStack {
return false
}
2024-09-18 21:16:34 +00:00
if closurePtr == 0 && optimized ( fr . Call . Fn ) {
return true
}
2024-07-11 17:26:38 +00:00
if closurePtr < 0 {
// closure is stack allocated, check that it is on this frame
return fr . contains ( closurePtr )
}
// otherwise closurePtr is a heap allocated variable, so we need to check
// all closure body variables in scope in this frame
scope := FrameToScope ( tgt , it . mem , it . g , 0 , * fr )
yields , _ := scope . simpleLocals ( localsNoDeclLineCheck | localsOnlyRangeBodyClosures , "" )
for _ , yield := range yields {
if yield . Kind != reflect . Func {
continue
}
addr := yield . funcvalAddr ( )
if int64 ( addr ) == closurePtr {
return true
}
}
return false
}
2024-06-04 19:52:30 +00:00
2024-07-11 17:26:38 +00:00
it . stacktraceFunc ( func ( fr Stackframe ) bool {
2024-06-04 19:52:30 +00:00
if len ( frames ) > 0 {
prev := & frames [ len ( frames ) - 1 ]
2024-06-14 21:32:34 +00:00
if fr . Regs . SP ( ) < prev . Regs . SP ( ) {
2024-06-04 19:52:30 +00:00
nonMonotonicSP = true
return false
}
}
2024-07-15 04:27:47 +00:00
if addRetFrame {
addRetFrame = false
frames = append ( frames , fr )
}
2024-09-18 21:16:34 +00:00
if fr . Call . Fn == nil {
if stage == startStage {
frames = nil
addRetFrame = false
stage = doneStage
return false
} else {
return true
}
}
2024-06-04 19:52:30 +00:00
switch stage {
2024-07-15 04:27:47 +00:00
case startStage :
2024-07-11 17:26:38 +00:00
appendFrame ( fr )
2024-06-04 19:52:30 +00:00
rangeParent = fr . Call . Fn . extra ( tgt . BinInfo ( ) ) . rangeParent
2024-07-15 04:27:47 +00:00
stage = normalStage
2024-09-18 21:16:34 +00:00
stop := false
if rangeParent == nil {
stop = true
}
if ! optimized ( fr . Call . Fn ) && closurePtr == 0 {
stop = true
}
if stop {
2024-06-04 19:52:30 +00:00
frames = nil
2024-07-15 04:27:47 +00:00
addRetFrame = false
stage = doneStage
2024-06-04 19:52:30 +00:00
return false
}
2024-07-15 04:27:47 +00:00
case normalStage :
2024-07-11 17:26:38 +00:00
if fr . Call . Fn . offset == rangeParent . offset && closurePtrOk ( & fr ) {
2024-07-15 04:27:47 +00:00
frames = append ( frames , fr )
stage = lastFrameStage
2024-07-11 17:26:38 +00:00
} else if fr . Call . Fn . extra ( tgt . BinInfo ( ) ) . rangeParent == rangeParent && closurePtrOk ( & fr ) {
appendFrame ( fr )
2024-09-18 21:16:34 +00:00
if ! optimized ( fr . Call . Fn ) && closurePtr == 0 {
2024-07-11 17:26:38 +00:00
frames = nil
2024-07-15 04:27:47 +00:00
addRetFrame = false
stage = doneStage
2024-07-11 17:26:38 +00:00
return false
}
2024-06-04 19:52:30 +00:00
}
2024-07-15 04:27:47 +00:00
case lastFrameStage :
2024-06-04 19:52:30 +00:00
frames = append ( frames , fr )
2024-07-15 04:27:47 +00:00
stage = doneStage
2024-06-04 19:52:30 +00:00
return false
2024-07-15 04:27:47 +00:00
case doneStage :
2024-06-04 19:52:30 +00:00
return false
}
return true
} )
if it . Err ( ) != nil {
return nil , err
}
if nonMonotonicSP {
return nil , errors . New ( "corrupted stack (SP not monotonically decreasing)" )
}
2024-07-15 04:27:47 +00:00
if stage != doneStage {
2024-06-04 19:52:30 +00:00
return nil , errors . New ( "could not find range-over-func closure parent on the stack" )
}
2024-07-15 04:27:47 +00:00
if len ( frames ) % 2 != 0 {
return nil , errors . New ( "incomplete range-over-func stacktrace" )
}
2024-06-04 19:52:30 +00:00
g . readDefers ( frames )
return frames , nil
}