2018-05-04 17:31:45 +00:00
package proc
import (
"debug/dwarf"
"encoding/binary"
"errors"
"fmt"
"go/ast"
"go/constant"
"reflect"
2024-10-04 17:44:57 +00:00
"slices"
2018-05-04 17:31:45 +00:00
"sort"
2019-06-17 16:51:29 +00:00
"strconv"
2019-06-30 17:44:30 +00:00
"strings"
2018-05-04 17:31:45 +00:00
2019-01-04 18:39:25 +00:00
"github.com/go-delve/delve/pkg/dwarf/godwarf"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/reader"
2021-04-28 17:00:26 +00:00
"github.com/go-delve/delve/pkg/dwarf/regnum"
2019-06-17 16:51:29 +00:00
"github.com/go-delve/delve/pkg/goversion"
2019-01-04 18:39:25 +00:00
"github.com/go-delve/delve/pkg/logflags"
2023-10-17 18:21:59 +00:00
"github.com/go-delve/delve/pkg/proc/evalop"
2018-05-04 17:31:45 +00:00
)
// This file implements the function call injection introduced in go1.11.
//
// The protocol is described in $GOROOT/src/runtime/asm_amd64.s in the
2023-10-17 18:21:59 +00:00
// comments for function runtime·debugCallVn.
2018-05-04 17:31:45 +00:00
//
2023-10-23 19:29:04 +00:00
// The main entry point is EvalExpressionWithCalls which will set up an
// evalStack object to evaluate the provided expression.
// This object can either finish immediately, if no function calls were
// needed, or return with callInjectionContinue set. When this happens
// EvalExpressionWithCalls will call Continue and return.
2018-05-04 17:31:45 +00:00
//
2023-10-23 19:29:04 +00:00
// The Continue loop will call evalStack.resume when it hits a breakpoint in
// the call injection protocol.
2019-05-09 15:29:58 +00:00
//
// The work of setting up the function call and executing the protocol is
2023-10-17 18:21:59 +00:00
// done by:
//
// - evalop.CallInjectionStart
// - evalop.CallInjectionSetTarget
// - evalCallInjectionCopyArg
// - evalCallInjectionComplete
2024-10-04 17:44:57 +00:00
//
// When the target has runtime.debugPinner then evalCallInjectionPinPointer
// must be also called in a loop until it returns false.
2018-05-04 17:31:45 +00:00
const (
debugCallFunctionNamePrefix1 = "debugCall"
debugCallFunctionNamePrefix2 = "runtime.debugCall"
2021-07-08 15:47:53 +00:00
maxDebugCallVersion = 2
2019-05-30 15:08:37 +00:00
maxArgFrameSize = 65535
2018-05-04 17:31:45 +00:00
)
var (
2018-08-31 18:08:18 +00:00
errFuncCallUnsupported = errors . New ( "function calls not supported by this version of Go" )
errFuncCallUnsupportedBackend = errors . New ( "backend does not support function calls" )
errFuncCallInProgress = errors . New ( "cannot call function while another function call is already in progress" )
errNoGoroutine = errors . New ( "no goroutine selected" )
errGoroutineNotRunning = errors . New ( "selected goroutine not running" )
errNotEnoughStack = errors . New ( "not enough stack space" )
errTooManyArguments = errors . New ( "too many arguments" )
errNotEnoughArguments = errors . New ( "not enough arguments" )
errNotAGoFunction = errors . New ( "not a Go function" )
2019-06-17 16:51:29 +00:00
errFuncCallNotAllowedStrAlloc = errors . New ( "literal string can not be allocated because function calls are not allowed without using 'call'" )
2018-05-04 17:31:45 +00:00
)
type functionCallState struct {
// savedRegs contains the saved registers
2018-08-03 15:17:01 +00:00
savedRegs Registers
2018-05-04 17:31:45 +00:00
// err contains a saved error
err error
2019-05-30 15:08:37 +00:00
// expr is the expression being evaluated
expr * ast . CallExpr
2018-05-04 17:31:45 +00:00
// fn is the function that is being called
fn * Function
2019-05-30 15:08:37 +00:00
// receiver is the receiver argument for the function
receiver * Variable
2018-07-31 16:32:30 +00:00
// closureAddr is the address of the closure being called
closureAddr uint64
2019-05-30 15:08:37 +00:00
// formalArgs are the formal arguments of fn
formalArgs [ ] funcCallArg
// argFrameSize contains the size of the arguments
argFrameSize int64
2018-05-04 17:31:45 +00:00
// retvars contains the return variables after the function call terminates without panic'ing
retvars [ ] * Variable
// panicvar is a variable used to store the value of the panic, if the
// called function panics.
panicvar * Variable
2023-10-17 18:21:59 +00:00
// undoInjection is set after evalop.CallInjectionSetTarget runs and cleared by evalCallInjectionComplete
2023-11-06 13:55:44 +00:00
// it contains information on how to undo a function call injection without running it
2023-10-17 18:21:59 +00:00
undoInjection * undoInjection
2024-10-04 17:44:57 +00:00
// hasDebugPinner is true if the target has runtime.debugPinner
hasDebugPinner bool
// doPinning is true if this call injection should pin the results
doPinning bool
// addrsToPin addresses from return variables that should be pinned
addrsToPin [ ] uint64
2023-10-17 18:21:59 +00:00
protocolReg uint64
debugCallName string
}
type undoInjection struct {
oldpc , oldlr uint64
2024-10-04 17:44:57 +00:00
doComplete2 bool
2018-05-04 17:31:45 +00:00
}
2019-05-09 15:29:58 +00:00
type callContext struct {
2023-09-20 07:17:45 +00:00
grp * TargetGroup
p * Target
2019-05-09 15:29:58 +00:00
// checkEscape is true if the escape check should be performed.
// See service/api.DebuggerCommand.UnsafeCall in service/api/types.go.
checkEscape bool
// retLoadCfg is the load configuration used to load return values
retLoadCfg LoadConfig
2020-07-28 16:19:51 +00:00
// injectionThread is the thread to use for nested call injections if the
// original injection goroutine isn't running (because we are in Go 1.15)
injectionThread Thread
// stacks is a slice of known goroutine stacks used to check for
// inappropriate escapes
stacks [ ] stack
2019-05-09 15:29:58 +00:00
}
2020-01-21 20:41:24 +00:00
type callInjection struct {
2023-10-23 19:29:04 +00:00
evalStack * evalStack
startThreadID int
endCallInjection func ( )
2019-05-09 15:29:58 +00:00
}
2024-10-04 17:44:57 +00:00
//lint:ignore U1000 this variable is only used by tests
var debugPinCount int
2019-05-09 15:29:58 +00:00
// EvalExpressionWithCalls is like EvalExpression but allows function calls in 'expr'.
// Because this can only be done in the current goroutine, unlike
// EvalExpression, EvalExpressionWithCalls is not a method of EvalScope.
2022-07-14 21:14:45 +00:00
func EvalExpressionWithCalls ( grp * TargetGroup , g * G , expr string , retLoadCfg LoadConfig , checkEscape bool ) error {
2024-10-04 17:44:57 +00:00
debugPinCount = 0
2022-07-14 21:14:45 +00:00
t := grp . Selected
2020-01-21 20:41:24 +00:00
bi := t . BinInfo ( )
if ! t . SupportsFunctionCalls ( ) {
2018-08-31 18:08:18 +00:00
return errFuncCallUnsupportedBackend
2018-05-04 17:31:45 +00:00
}
2023-02-14 17:38:15 +00:00
producer := bi . Producer ( )
if producer == "" || ! goversion . ProducerAfterOrEqual ( bi . Producer ( ) , 1 , 12 ) {
return errFuncCallUnsupported
}
2018-05-04 17:31:45 +00:00
2019-06-30 17:44:30 +00:00
// check that the target goroutine is running
2018-05-04 17:31:45 +00:00
if g == nil {
2018-08-31 18:08:18 +00:00
return errNoGoroutine
2018-05-04 17:31:45 +00:00
}
if g . Status != Grunning || g . Thread == nil {
2018-08-31 18:08:18 +00:00
return errGoroutineNotRunning
2018-05-04 17:31:45 +00:00
}
2023-10-23 19:29:04 +00:00
if callinj := t . fncallForG [ g . ID ] ; callinj != nil && callinj . evalStack != nil {
2019-06-30 17:44:30 +00:00
return errFuncCallInProgress
}
2021-07-08 15:47:53 +00:00
dbgcallfn , _ := debugCallFunction ( bi )
2019-06-30 17:44:30 +00:00
if dbgcallfn == nil {
return errFuncCallUnsupported
}
2021-07-02 16:37:55 +00:00
scope , err := GoroutineScope ( t , g . Thread )
2019-05-09 15:29:58 +00:00
if err != nil {
return err
}
scope . callCtx = & callContext {
2023-10-23 19:29:04 +00:00
grp : grp ,
p : t ,
checkEscape : checkEscape ,
retLoadCfg : retLoadCfg ,
2019-05-09 15:29:58 +00:00
}
2023-10-23 19:29:04 +00:00
scope . loadCfg = & retLoadCfg
2019-05-09 15:29:58 +00:00
2021-10-14 18:06:14 +00:00
endCallInjection , err := t . proc . StartCallInjection ( )
if err != nil {
return err
}
2024-10-04 17:44:57 +00:00
ops , err := evalop . Compile ( scopeToEvalLookup { scope } , expr , scope . evalopFlags ( ) | evalop . CanSet )
2023-10-23 19:29:04 +00:00
if err != nil {
return err
2019-06-30 17:44:30 +00:00
}
2019-05-09 15:29:58 +00:00
2023-10-23 19:29:04 +00:00
stack := & evalStack { }
t . fncallForG [ g . ID ] = & callInjection {
evalStack : stack ,
startThreadID : 0 ,
endCallInjection : endCallInjection ,
}
2019-05-09 15:29:58 +00:00
2023-10-23 19:29:04 +00:00
stack . eval ( scope , ops )
2024-10-04 17:44:57 +00:00
if stack . callInjectionContinue && stack . err == nil {
2022-07-14 21:14:45 +00:00
return grp . Continue ( )
2019-05-09 15:29:58 +00:00
}
2023-10-23 19:29:04 +00:00
return finishEvalExpressionWithCalls ( t , g , stack )
2019-05-09 15:29:58 +00:00
}
2023-10-23 19:29:04 +00:00
func finishEvalExpressionWithCalls ( t * Target , g * G , stack * evalStack ) error {
2020-07-28 16:19:51 +00:00
fncallLog ( "stashing return values for %d in thread=%d" , g . ID , g . Thread . ThreadID ( ) )
2020-12-10 16:57:50 +00:00
g . Thread . Common ( ) . CallReturn = true
2023-10-23 19:29:04 +00:00
ret , err := stack . result ( & stack . scope . callCtx . retLoadCfg )
if err != nil {
if fpe , ispanic := stack . err . ( fncallPanicErr ) ; ispanic {
err = nil
2019-06-30 17:44:30 +00:00
g . Thread . Common ( ) . returnValues = [ ] * Variable { fpe . panicVar }
2019-05-09 15:29:58 +00:00
}
2023-10-23 19:29:04 +00:00
} else if ret == nil {
2019-06-30 17:44:30 +00:00
g . Thread . Common ( ) . returnValues = nil
2023-10-23 19:29:04 +00:00
} else if ret . Addr == 0 && ret . DwarfType == nil && ret . Kind == reflect . Invalid {
2019-05-09 15:29:58 +00:00
// this is a variable returned by a function call with multiple return values
2023-10-23 19:29:04 +00:00
r := make ( [ ] * Variable , len ( ret . Children ) )
for i := range ret . Children {
r [ i ] = & ret . Children [ i ]
2019-05-09 15:29:58 +00:00
}
2019-06-30 17:44:30 +00:00
g . Thread . Common ( ) . returnValues = r
2019-05-09 15:29:58 +00:00
} else {
2023-10-23 19:29:04 +00:00
g . Thread . Common ( ) . returnValues = [ ] * Variable { ret }
2019-05-09 15:29:58 +00:00
}
2021-10-14 18:06:14 +00:00
callinj := t . fncallForG [ g . ID ]
for goid := range t . fncallForG {
if t . fncallForG [ goid ] == callinj {
delete ( t . fncallForG , goid )
}
}
2023-10-23 19:29:04 +00:00
callinj . evalStack = nil
2021-10-14 18:06:14 +00:00
callinj . endCallInjection ( )
2019-05-09 15:29:58 +00:00
return err
}
2023-10-17 18:21:59 +00:00
func ( scope * EvalScope ) evalCallInjectionStart ( op * evalop . CallInjectionStart , stack * evalStack ) {
2019-05-09 15:29:58 +00:00
if scope . callCtx == nil {
2023-10-17 18:21:59 +00:00
stack . err = evalop . ErrFuncCallNotAllowed
return
2019-05-09 15:29:58 +00:00
}
2020-07-28 16:19:51 +00:00
thread := scope . g . Thread
stacklo := scope . g . stack . lo
if thread == nil {
// We are doing a nested function call and using Go 1.15, the original
// injection goroutine was suspended and now we are using a different
2022-03-23 15:10:00 +00:00
// goroutine, evaluation still happened on the original goroutine but we
2020-07-28 16:19:51 +00:00
// need to use a different thread to do the nested call injection.
thread = scope . callCtx . injectionThread
g2 , err := GetG ( thread )
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2020-07-28 16:19:51 +00:00
}
stacklo = g2 . stack . lo
}
if thread == nil {
2023-10-17 18:21:59 +00:00
stack . err = errGoroutineNotRunning
return
2020-07-28 16:19:51 +00:00
}
2019-05-09 15:29:58 +00:00
p := scope . callCtx . p
bi := scope . BinInfo
2020-01-21 20:41:24 +00:00
if ! p . SupportsFunctionCalls ( ) {
2023-10-17 18:21:59 +00:00
stack . err = errFuncCallUnsupportedBackend
return
2019-05-09 15:29:58 +00:00
}
2021-07-08 15:47:53 +00:00
dbgcallfn , dbgcallversion := debugCallFunction ( bi )
2019-05-09 15:29:58 +00:00
if dbgcallfn == nil {
2023-10-17 18:21:59 +00:00
stack . err = errFuncCallUnsupported
return
2019-05-09 15:29:58 +00:00
}
2018-05-04 17:31:45 +00:00
// check that there are at least 256 bytes free on the stack
2020-07-28 16:19:51 +00:00
regs , err := thread . Registers ( )
2020-05-13 18:56:50 +00:00
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2020-05-13 18:56:50 +00:00
}
regs , err = regs . Copy ( )
2018-05-04 17:31:45 +00:00
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2018-05-04 17:31:45 +00:00
}
2022-05-03 17:46:24 +00:00
if regs . SP ( ) - bi . Arch . debugCallMinStackSize <= stacklo {
2023-10-17 18:21:59 +00:00
stack . err = errNotEnoughStack
return
2018-05-04 17:31:45 +00:00
}
2022-05-03 17:46:24 +00:00
protocolReg , ok := debugCallProtocolReg ( bi . Arch . Name , dbgcallversion )
2021-07-08 15:47:53 +00:00
if ! ok {
2023-10-17 18:21:59 +00:00
stack . err = errFuncCallUnsupported
return
2021-07-08 15:47:53 +00:00
}
if bi . Arch . RegistersToDwarfRegisters ( 0 , regs ) . Reg ( protocolReg ) == nil {
2023-10-17 18:21:59 +00:00
stack . err = errFuncCallUnsupportedBackend
return
2018-05-04 17:31:45 +00:00
}
2024-10-04 17:44:57 +00:00
for _ , v := range stack . stack {
if v . Flags & ( VariableFakeAddress | VariableCPURegister | variableSaved ) != 0 || v . Unreadable != nil || v . DwarfType == nil || v . RealType == nil || v . Addr == 0 {
continue
}
saveVariable ( v )
}
2019-05-30 15:08:37 +00:00
fncall := functionCallState {
2024-10-04 17:44:57 +00:00
expr : op . Node ,
savedRegs : regs ,
protocolReg : protocolReg ,
debugCallName : dbgcallfn . Name ,
hasDebugPinner : scope . BinInfo . hasDebugPinner ( ) ,
2018-05-04 17:31:45 +00:00
}
2023-10-17 18:21:59 +00:00
if op . HasFunc {
err = funcCallEvalFuncExpr ( scope , stack , & fncall )
if err != nil {
stack . err = err
return
}
2018-05-04 17:31:45 +00:00
}
2022-05-03 17:46:24 +00:00
switch bi . Arch . Name {
case "amd64" :
if err := callOP ( bi , thread , regs , dbgcallfn . Entry ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
// write the desired argument frame size at SP-(2*pointer_size) (the extra pointer is the saved PC)
if err := writePointer ( bi , scope . Mem , regs . SP ( ) - 3 * uint64 ( bi . Arch . PtrSize ( ) ) , uint64 ( fncall . argFrameSize ) ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
2023-09-21 17:39:57 +00:00
case "arm64" , "ppc64le" :
2022-05-03 17:46:24 +00:00
// debugCallV2 on arm64 needs a special call sequence, callOP can not be used
sp := regs . SP ( )
2023-09-21 17:39:57 +00:00
var spOffset uint64
if bi . Arch . Name == "arm64" {
spOffset = 2 * uint64 ( bi . Arch . PtrSize ( ) )
} else {
spOffset = 4 * uint64 ( bi . Arch . PtrSize ( ) )
}
sp -= spOffset
2022-05-03 17:46:24 +00:00
if err := setSP ( thread , sp ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
if err := writePointer ( bi , scope . Mem , sp , regs . LR ( ) ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
if err := setLR ( thread , regs . PC ( ) ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
2023-09-21 17:39:57 +00:00
if err := writePointer ( bi , scope . Mem , sp - spOffset , uint64 ( fncall . argFrameSize ) ) ; err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
regs , err = thread . Registers ( )
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
regs , err = regs . Copy ( )
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
fncall . savedRegs = regs
err = setPC ( thread , dbgcallfn . Entry )
if err != nil {
2023-10-17 18:21:59 +00:00
stack . err = err
return
2022-05-03 17:46:24 +00:00
}
2018-05-04 17:31:45 +00:00
}
2020-07-28 16:19:51 +00:00
fncallLog ( "function call initiated %v frame size %d goroutine %d (thread %d)" , fncall . fn , fncall . argFrameSize , scope . g . ID , thread . ThreadID ( ) )
2020-12-10 17:03:11 +00:00
thread . Breakpoint ( ) . Clear ( ) // since we moved address in PC the thread is no longer stopped at a breakpoint, leaving the breakpoint set will confuse Continue
2020-07-28 16:19:51 +00:00
p . fncallForG [ scope . g . ID ] . startThreadID = thread . ThreadID ( )
2018-05-04 17:31:45 +00:00
2023-10-17 18:21:59 +00:00
stack . fncallPush ( & fncall )
2024-10-04 17:44:57 +00:00
stack . push ( newConstant ( constant . MakeBool ( ! fncall . hasDebugPinner && ( fncall . fn == nil || fncall . receiver != nil || fncall . closureAddr != 0 ) ) , scope . Mem ) )
2023-10-17 18:21:59 +00:00
stack . callInjectionContinue = true
}
2019-05-09 15:29:58 +00:00
2024-10-04 17:44:57 +00:00
func saveVariable ( v * Variable ) {
v . mem = cacheMemory ( v . mem , v . Addr , int ( v . RealType . Size ( ) ) )
v . Flags |= variableSaved
if cachemem , ok := v . mem . ( * memCache ) ; ok {
v . Unreadable = cachemem . load ( )
}
}
2023-10-17 18:21:59 +00:00
func funcCallFinish ( scope * EvalScope , stack * evalStack ) {
fncall := stack . fncallPop ( )
2019-05-09 15:29:58 +00:00
if fncall . err != nil {
2023-10-17 18:21:59 +00:00
if stack . err == nil {
stack . err = fncall . err
} else {
fncallLog ( "additional fncall error: %v" , fncall . err )
}
return
2019-05-09 15:29:58 +00:00
}
if fncall . panicvar != nil {
2023-10-17 18:21:59 +00:00
if stack . err == nil {
stack . err = fncallPanicErr { fncall . panicvar }
} else {
fncallLog ( "additional fncall panic: %v" , fncall . panicvar )
}
return
2019-05-09 15:29:58 +00:00
}
switch len ( fncall . retvars ) {
case 0 :
2019-08-11 20:56:16 +00:00
r := newVariable ( "" , 0 , nil , scope . BinInfo , nil )
2019-05-09 15:29:58 +00:00
r . loaded = true
r . Unreadable = errors . New ( "no return values" )
2023-10-17 18:21:59 +00:00
stack . push ( r )
2019-05-09 15:29:58 +00:00
case 1 :
2023-10-17 18:21:59 +00:00
stack . push ( fncall . retvars [ 0 ] )
2019-05-09 15:29:58 +00:00
default :
// create a fake variable without address or type to return multiple values
2019-08-11 20:56:16 +00:00
r := newVariable ( "" , 0 , nil , scope . BinInfo , nil )
2019-05-09 15:29:58 +00:00
r . loaded = true
r . Children = make ( [ ] Variable , len ( fncall . retvars ) )
for i := range fncall . retvars {
r . Children [ i ] = * fncall . retvars [ i ]
}
2023-10-17 18:21:59 +00:00
stack . push ( r )
2019-05-09 15:29:58 +00:00
}
}
// fncallPanicErr is the error returned if a called function panics
type fncallPanicErr struct {
panicVar * Variable
}
func ( err fncallPanicErr ) Error ( ) string {
2021-04-12 21:57:39 +00:00
return "panic calling a function"
2018-05-04 17:31:45 +00:00
}
func fncallLog ( fmtstr string , args ... interface { } ) {
2019-03-27 21:58:36 +00:00
logflags . FnCallLogger ( ) . Infof ( fmtstr , args ... )
2018-05-04 17:31:45 +00:00
}
// writePointer writes val as an architecture pointer at addr in mem.
func writePointer ( bi * BinaryInfo , mem MemoryReadWriter , addr , val uint64 ) error {
ptrbuf := make ( [ ] byte , bi . Arch . PtrSize ( ) )
// TODO: use target architecture endianness instead of LittleEndian
switch len ( ptrbuf ) {
case 4 :
binary . LittleEndian . PutUint32 ( ptrbuf , uint32 ( val ) )
case 8 :
binary . LittleEndian . PutUint64 ( ptrbuf , val )
default :
panic ( fmt . Errorf ( "unsupported pointer size %d" , len ( ptrbuf ) ) )
}
2020-09-09 17:36:15 +00:00
_ , err := mem . WriteMemory ( addr , ptrbuf )
2018-05-04 17:31:45 +00:00
return err
}
// callOP simulates a call instruction on the given thread:
// * pushes the current value of PC on the stack (adjusting SP)
// * changes the value of PC to callAddr
// Note: regs are NOT updated!
func callOP ( bi * BinaryInfo , thread Thread , regs Registers , callAddr uint64 ) error {
2022-05-03 17:46:24 +00:00
switch bi . Arch . Name {
case "amd64" :
sp := regs . SP ( )
// push PC on the stack
sp -= uint64 ( bi . Arch . PtrSize ( ) )
if err := setSP ( thread , sp ) ; err != nil {
return err
}
if err := writePointer ( bi , thread . ProcessMemory ( ) , sp , regs . PC ( ) ) ; err != nil {
return err
}
return setPC ( thread , callAddr )
2023-09-21 17:39:57 +00:00
case "arm64" , "ppc64le" :
2022-05-03 17:46:24 +00:00
if err := setLR ( thread , regs . PC ( ) ) ; err != nil {
return err
}
return setPC ( thread , callAddr )
2023-09-21 17:39:57 +00:00
2022-05-03 17:46:24 +00:00
default :
panic ( "not implemented" )
2018-05-04 17:31:45 +00:00
}
}
2019-05-30 15:08:37 +00:00
// funcCallEvalFuncExpr evaluates expr.Fun and returns the function that we're trying to call.
// If allowCalls is false function calls will be disabled even if scope.callCtx != nil
2023-10-17 18:21:59 +00:00
func funcCallEvalFuncExpr ( scope * EvalScope , stack * evalStack , fncall * functionCallState ) error {
2019-05-09 15:29:58 +00:00
bi := scope . BinInfo
2018-05-04 17:31:45 +00:00
2023-10-17 18:21:59 +00:00
fnvar := stack . peek ( )
2018-05-04 17:31:45 +00:00
if fnvar . Kind != reflect . Func {
2019-05-30 15:08:37 +00:00
return fmt . Errorf ( "expression %q is not a function" , exprToString ( fncall . expr . Fun ) )
2018-07-31 16:32:30 +00:00
}
2018-10-29 11:22:03 +00:00
fnvar . loadValue ( LoadConfig { false , 0 , 0 , 0 , 0 , 0 } )
2018-07-31 16:32:30 +00:00
if fnvar . Unreadable != nil {
2019-05-30 15:08:37 +00:00
return fnvar . Unreadable
2018-07-31 16:32:30 +00:00
}
if fnvar . Base == 0 {
2019-05-30 15:08:37 +00:00
return errors . New ( "nil pointer dereference" )
2018-05-04 17:31:45 +00:00
}
2023-11-14 15:36:55 +00:00
fncall . fn = bi . PCToFunc ( fnvar . Base )
2019-05-30 15:08:37 +00:00
if fncall . fn == nil {
return fmt . Errorf ( "could not find DIE for function %q" , exprToString ( fncall . expr . Fun ) )
2018-05-04 17:31:45 +00:00
}
2019-05-30 15:08:37 +00:00
if ! fncall . fn . cu . isgo {
return errNotAGoFunction
2018-05-04 17:31:45 +00:00
}
2019-05-30 15:08:37 +00:00
fncall . closureAddr = fnvar . closureAddr
2023-10-17 18:21:59 +00:00
var err error
2019-05-30 15:08:37 +00:00
fncall . argFrameSize , fncall . formalArgs , err = funcCallArgs ( fncall . fn , bi , false )
if err != nil {
return err
}
argnum := len ( fncall . expr . Args )
2018-05-04 17:31:45 +00:00
2020-02-18 22:19:06 +00:00
// If the function variable has a child then that child is the method
// receiver. However, if the method receiver is not being used (e.g.
// func (_ X) Foo()) then it will not actually be listed as a formal
// argument. Ensure that we are really off by 1 to add the receiver to
// the function call.
if len ( fnvar . Children ) > 0 && argnum == ( len ( fncall . formalArgs ) - 1 ) {
2019-05-30 15:08:37 +00:00
argnum ++
fncall . receiver = & fnvar . Children [ 0 ]
fncall . receiver . Name = exprToString ( fncall . expr . Fun )
2018-07-31 12:50:10 +00:00
}
2019-05-30 15:08:37 +00:00
if argnum > len ( fncall . formalArgs ) {
return errTooManyArguments
}
if argnum < len ( fncall . formalArgs ) {
return errNotEnoughArguments
2018-05-04 17:31:45 +00:00
}
2019-05-30 15:08:37 +00:00
return nil
2018-05-04 17:31:45 +00:00
}
type funcCallArg struct {
2021-07-08 15:47:53 +00:00
name string
typ godwarf . Type
off int64
dwarfEntry * godwarf . Tree // non-nil if Go 1.17+
isret bool
2018-05-04 17:31:45 +00:00
}
2023-10-17 18:21:59 +00:00
func funcCallCopyOneArg ( scope * EvalScope , fncall * functionCallState , actualArg * Variable , formalArg * funcCallArg , thread Thread ) error {
2019-05-30 15:08:37 +00:00
if scope . callCtx . checkEscape {
//TODO(aarzilli): only apply the escapeCheck to leaking parameters.
2024-04-19 15:44:47 +00:00
err := allPointers ( actualArg , formalArg . name , func ( addr uint64 , name string ) error {
if ! pointerEscapes ( addr , scope . g . stack , scope . callCtx . stacks ) {
return fmt . Errorf ( "cannot use %s as argument %s in function %s: stack object passed to escaping pointer: %s" , actualArg . Name , formalArg . name , fncall . fn . Name , name )
2020-07-28 16:19:51 +00:00
}
2024-04-19 15:44:47 +00:00
return nil
} )
if err != nil {
return err
2020-07-28 16:19:51 +00:00
}
2018-07-30 19:02:35 +00:00
}
2022-12-28 11:41:13 +00:00
//TODO(aarzilli): automatic wrapping in interfaces for cases not handled
2019-05-30 15:08:37 +00:00
// by convertToEface.
2023-10-17 18:21:59 +00:00
formalScope , err := GoroutineScope ( scope . target , thread )
if err != nil {
return err
}
2021-07-08 15:47:53 +00:00
var formalArgVar * Variable
if formalArg . dwarfEntry != nil {
var err error
2021-09-16 19:26:36 +00:00
formalArgVar , err = extractVarInfoFromEntry ( scope . target , formalScope . BinInfo , formalScope . image ( ) , formalScope . Regs , formalScope . Mem , formalArg . dwarfEntry , 0 )
2021-07-08 15:47:53 +00:00
if err != nil {
return err
}
} else {
2023-11-14 15:36:55 +00:00
formalArgVar = newVariable ( formalArg . name , uint64 ( formalArg . off + formalScope . Regs . CFA ) , formalArg . typ , scope . BinInfo , scope . Mem )
2021-07-08 15:47:53 +00:00
}
2019-06-17 16:51:29 +00:00
if err := scope . setValue ( formalArgVar , actualArg , actualArg . Name ) ; err != nil {
2019-05-30 15:08:37 +00:00
return err
}
return nil
2018-07-30 19:02:35 +00:00
}
func funcCallArgs ( fn * Function , bi * BinaryInfo , includeRet bool ) ( argFrameSize int64 , formalArgs [ ] funcCallArg , err error ) {
2020-03-20 17:23:10 +00:00
dwarfTree , err := fn . cu . image . getDwarfTree ( fn . offset )
if err != nil {
return 0 , nil , fmt . Errorf ( "DWARF read error: %v" , err )
}
2024-09-18 21:16:34 +00:00
if bi . regabi && fn . Optimized ( ) {
2023-11-20 18:43:15 +00:00
if runtimeWhitelist [ fn . Name ] {
runtimeOptimizedWorkaround ( bi , fn . cu . image , dwarfTree )
} else {
// Debug info for function arguments on optimized functions is currently
// too incomplete to attempt injecting calls to arbitrary optimized
// functions.
// Prior to regabi we could do this because the ABI was simple enough to
// manually encode it in Delve.
// Runtime.mallocgc is an exception, we specifically patch it's DIE to be
// correct for call injection purposes.
return 0 , nil , fmt . Errorf ( "can not call optimized function %s when regabi is in use" , fn . Name )
}
2021-07-08 15:47:53 +00:00
}
varEntries := reader . Variables ( dwarfTree , fn . Entry , int ( ^ uint ( 0 ) >> 1 ) , reader . VariablesSkipInlinedSubroutines )
2018-05-04 17:31:45 +00:00
// typechecks arguments, calculates argument frame size
2020-03-20 17:23:10 +00:00
for _ , entry := range varEntries {
if entry . Tag != dwarf . TagFormalParameter {
2018-05-04 17:31:45 +00:00
continue
}
2020-03-20 17:23:10 +00:00
argname , typ , err := readVarEntry ( entry . Tree , fn . cu . image )
2018-05-04 17:31:45 +00:00
if err != nil {
2018-07-30 19:02:35 +00:00
return 0 , nil , err
2018-05-04 17:31:45 +00:00
}
typ = resolveTypedef ( typ )
2019-09-25 17:23:02 +00:00
2021-07-08 15:47:53 +00:00
var formalArg * funcCallArg
if bi . regabi {
formalArg , err = funcCallArgRegABI ( fn , bi , entry , argname , typ , & argFrameSize )
2019-06-17 16:51:29 +00:00
} else {
2023-02-14 17:38:15 +00:00
formalArg , err = funcCallArgOldABI ( fn , bi , entry , argname , typ , & argFrameSize )
2019-06-17 16:51:29 +00:00
}
2019-09-25 17:23:02 +00:00
if err != nil {
2021-07-08 15:47:53 +00:00
return 0 , nil , err
2019-09-25 17:23:02 +00:00
}
2021-07-08 15:47:53 +00:00
if ! formalArg . isret || includeRet {
formalArgs = append ( formalArgs , * formalArg )
2018-05-04 17:31:45 +00:00
}
2021-07-08 15:47:53 +00:00
}
2018-05-04 17:31:45 +00:00
2021-07-08 15:47:53 +00:00
if bi . regabi {
// The argument frame size is computed conservatively, assuming that
// there's space for each argument on the stack even if its passed in
// registers. Unfortunately this isn't quite enough because the register
// assignment algorithm Go uses can result in an amount of additional
// space used due to alignment requirements, bounded by the number of argument registers.
// Because we currently don't have an easy way to obtain the frame size,
// let's be even more conservative.
// A safe lower-bound on the size of the argument frame includes space for
// each argument plus the total bytes of register arguments.
// This is derived from worst-case alignment padding of up to
// (pointer-word-bytes - 1) per argument passed in registers.
// See: https://github.com/go-delve/delve/pull/2451#discussion_r665761531
// TODO: Make this generic for other platforms.
argFrameSize = alignAddr ( argFrameSize , 8 )
2022-05-03 17:46:24 +00:00
argFrameSize += int64 ( bi . Arch . maxRegArgBytes )
2018-05-04 17:31:45 +00:00
}
sort . Slice ( formalArgs , func ( i , j int ) bool {
return formalArgs [ i ] . off < formalArgs [ j ] . off
} )
2018-07-30 19:02:35 +00:00
return argFrameSize , formalArgs , nil
2018-05-04 17:31:45 +00:00
}
2023-02-14 17:38:15 +00:00
func funcCallArgOldABI ( fn * Function , bi * BinaryInfo , entry reader . Variable , argname string , typ godwarf . Type , pargFrameSize * int64 ) ( * funcCallArg , error ) {
2021-07-08 15:47:53 +00:00
const CFA = 0x1000
var off int64
locprog , _ , err := bi . locationExpr ( entry , dwarf . AttrLocation , fn . Entry )
if err != nil {
err = fmt . Errorf ( "could not get argument location of %s: %v" , argname , err )
} else {
var pieces [ ] op . Piece
2021-08-03 16:51:15 +00:00
off , pieces , err = op . ExecuteStackProgram ( op . DwarfRegisters { CFA : CFA , FrameBase : CFA } , locprog , bi . Arch . PtrSize ( ) , nil )
2021-07-08 15:47:53 +00:00
if err != nil {
err = fmt . Errorf ( "unsupported location expression for argument %s: %v" , argname , err )
}
if pieces != nil {
err = fmt . Errorf ( "unsupported location expression for argument %s (uses DW_OP_piece)" , argname )
}
off -= CFA
}
if err != nil {
// With Go version 1.12 or later we can trust that the arguments appear
// in the same order as declared, which means we can calculate their
// address automatically.
// With this we can call optimized functions (which sometimes do not have
// an argument address, due to a compiler bug) as well as runtime
// functions (which are always optimized).
off = * pargFrameSize
off = alignAddr ( off , typ . Align ( ) )
}
if e := off + typ . Size ( ) ; e > * pargFrameSize {
* pargFrameSize = e
}
isret , _ := entry . Val ( dwarf . AttrVarParam ) . ( bool )
return & funcCallArg { name : argname , typ : typ , off : off , isret : isret } , nil
}
func funcCallArgRegABI ( fn * Function , bi * BinaryInfo , entry reader . Variable , argname string , typ godwarf . Type , pargFrameSize * int64 ) ( * funcCallArg , error ) {
// Conservatively calculate the full stack argument space for ABI0.
* pargFrameSize = alignAddr ( * pargFrameSize , typ . Align ( ) )
* pargFrameSize += typ . Size ( )
isret , _ := entry . Val ( dwarf . AttrVarParam ) . ( bool )
return & funcCallArg { name : argname , typ : typ , dwarfEntry : entry . Tree , isret : isret } , nil
}
2019-09-25 17:23:02 +00:00
// alignAddr rounds up addr to a multiple of align. Align must be a power of 2.
func alignAddr ( addr , align int64 ) int64 {
2023-11-14 15:36:55 +00:00
return ( addr + align - 1 ) &^ ( align - 1 )
2019-09-25 17:23:02 +00:00
}
2024-04-19 15:44:47 +00:00
// allPointers calls f on every pointer contained in v
func allPointers ( v * Variable , name string , f func ( addr uint64 , name string ) error ) error {
2023-03-27 18:21:01 +00:00
if v . Unreadable != nil {
return fmt . Errorf ( "escape check for %s failed, variable unreadable: %v" , name , v . Unreadable )
}
2018-05-04 17:31:45 +00:00
switch v . Kind {
2024-10-04 17:44:57 +00:00
case reflect . Ptr , reflect . UnsafePointer :
2018-08-18 08:53:21 +00:00
var w * Variable
if len ( v . Children ) == 1 {
// this branch is here to support pointers constructed with typecasts from ints or the '&' operator
w = & v . Children [ 0 ]
} else {
w = v . maybeDereference ( )
}
2024-04-19 15:44:47 +00:00
return f ( w . Addr , name )
2018-05-04 17:31:45 +00:00
case reflect . Chan , reflect . String , reflect . Slice :
2024-04-19 15:44:47 +00:00
return f ( v . Base , name )
2018-05-04 17:31:45 +00:00
case reflect . Map :
sv := v . clone ( )
sv . RealType = resolveTypedef ( & ( v . RealType . ( * godwarf . MapType ) . TypedefType ) )
sv = sv . maybeDereference ( )
2024-04-19 15:44:47 +00:00
return f ( sv . Addr , name )
2024-10-04 17:44:57 +00:00
case reflect . Interface :
sv := v . clone ( )
sv . RealType = resolveTypedef ( & ( v . RealType . ( * godwarf . InterfaceType ) . TypedefType ) )
sv = sv . maybeDereference ( )
sv . Kind = reflect . Struct
return allPointers ( sv , name , f )
2018-05-04 17:31:45 +00:00
case reflect . Struct :
t := v . RealType . ( * godwarf . StructType )
for _ , field := range t . Field {
fv , _ := v . toField ( field )
2024-04-19 15:44:47 +00:00
if err := allPointers ( fv , fmt . Sprintf ( "%s.%s" , name , field . Name ) , f ) ; err != nil {
2018-05-04 17:31:45 +00:00
return err
}
}
case reflect . Array :
for i := int64 ( 0 ) ; i < v . Len ; i ++ {
sv , _ := v . sliceAccess ( int ( i ) )
2024-04-19 15:44:47 +00:00
if err := allPointers ( sv , fmt . Sprintf ( "%s[%d]" , name , i ) , f ) ; err != nil {
2018-05-04 17:31:45 +00:00
return err
}
}
case reflect . Func :
2024-04-19 15:44:47 +00:00
if err := f ( v . funcvalAddr ( ) , name ) ; err != nil {
2018-07-31 16:32:30 +00:00
return err
}
2024-10-04 17:44:57 +00:00
case reflect . Complex64 , reflect . Complex128 , reflect . Int , reflect . Int8 , reflect . Int16 , reflect . Int32 , reflect . Int64 , reflect . Uint , reflect . Uint8 , reflect . Uint16 , reflect . Uint32 , reflect . Uint64 , reflect . Uintptr , reflect . Bool , reflect . Float32 , reflect . Float64 :
// nothing to do
default :
panic ( fmt . Errorf ( "not implemented: %s" , v . Kind ) )
2018-05-04 17:31:45 +00:00
}
return nil
}
2024-04-19 15:44:47 +00:00
func pointerEscapes ( addr uint64 , stack stack , stacks [ ] stack ) bool {
2023-11-14 15:36:55 +00:00
if addr >= stack . lo && addr < stack . hi {
2024-04-19 15:44:47 +00:00
return false
2018-05-04 17:31:45 +00:00
}
2024-04-19 15:44:47 +00:00
for _ , stack := range stacks {
if addr >= stack . lo && addr < stack . hi {
return false
}
}
return true
2018-05-04 17:31:45 +00:00
}
const (
2021-07-08 15:47:53 +00:00
debugCallRegPrecheckFailed = 8
debugCallRegCompleteCall = 0
debugCallRegReadReturn = 1
debugCallRegReadPanic = 2
debugCallRegRestoreRegisters = 16
2018-05-04 17:31:45 +00:00
)
2019-05-09 15:29:58 +00:00
// funcCallStep executes one step of the function call injection protocol.
2023-10-17 18:21:59 +00:00
func funcCallStep ( callScope * EvalScope , stack * evalStack , thread Thread ) bool {
2019-05-30 15:08:37 +00:00
p := callScope . callCtx . p
2018-05-04 17:31:45 +00:00
bi := p . BinInfo ( )
2023-10-17 18:21:59 +00:00
fncall := stack . fncallPeek ( )
2018-05-04 17:31:45 +00:00
2020-05-13 18:56:50 +00:00
regs , err := thread . Registers ( )
if err != nil {
fncall . err = err
return true
}
2018-05-04 17:31:45 +00:00
2023-10-17 18:21:59 +00:00
regval := bi . Arch . RegistersToDwarfRegisters ( 0 , regs ) . Uint64Val ( fncall . protocolReg )
2018-05-04 17:31:45 +00:00
if logflags . FnCall ( ) {
loc , _ := thread . Location ( )
var pc uint64
var fnname string
if loc != nil {
pc = loc . PC
if loc . Fn != nil {
fnname = loc . Fn . Name
}
}
2023-09-19 16:34:34 +00:00
fncallLog ( "function call interrupt gid=%d (original) thread=%d regval=%#x (PC=%#x in %s %s:%d)" , callScope . g . ID , thread . ThreadID ( ) , regval , pc , fnname , loc . File , loc . Line )
2018-05-04 17:31:45 +00:00
}
2021-07-08 15:47:53 +00:00
switch regval {
2022-05-03 17:46:24 +00:00
case debugCallRegPrecheckFailed : // 8
2023-10-17 18:21:59 +00:00
stack . callInjectionContinue = true
2022-05-03 17:46:24 +00:00
archoff := uint64 ( 0 )
if bi . Arch . Name == "arm64" {
archoff = 8
2023-09-21 17:39:57 +00:00
} else if bi . Arch . Name == "ppc64le" {
archoff = 40
2022-05-03 17:46:24 +00:00
}
2018-05-04 17:31:45 +00:00
// get error from top of the stack and return it to user
2022-05-03 17:46:24 +00:00
errvar , err := readStackVariable ( p , thread , regs , archoff , "string" , loadFullValue )
2018-05-04 17:31:45 +00:00
if err != nil {
fncall . err = fmt . Errorf ( "could not get precheck error reason: %v" , err )
break
}
errvar . Name = "err"
fncall . err = fmt . Errorf ( "%v" , constant . StringVal ( errvar . Value ) )
2022-05-03 17:46:24 +00:00
case debugCallRegCompleteCall : // 0
2020-07-28 16:19:51 +00:00
p . fncallForG [ callScope . g . ID ] . startThreadID = 0
2022-05-03 17:46:24 +00:00
case debugCallRegRestoreRegisters : // 16
2018-05-04 17:31:45 +00:00
// runtime requests that we restore the registers (all except pc and sp),
// this is also the last step of the function call protocol.
pc , sp := regs . PC ( ) , regs . SP ( )
if err := thread . RestoreRegisters ( fncall . savedRegs ) ; err != nil {
fncall . err = fmt . Errorf ( "could not restore registers: %v" , err )
}
2021-03-04 18:28:28 +00:00
if err := setPC ( thread , pc ) ; err != nil {
2018-05-04 17:31:45 +00:00
fncall . err = fmt . Errorf ( "could not restore PC: %v" , err )
}
2021-03-04 18:28:28 +00:00
if err := setSP ( thread , sp ) ; err != nil {
2018-05-04 17:31:45 +00:00
fncall . err = fmt . Errorf ( "could not restore SP: %v" , err )
}
2023-09-19 16:34:34 +00:00
fncallLog ( "stepping thread %d" , thread . ThreadID ( ) )
2023-10-17 18:21:59 +00:00
if err := stepInstructionOut ( callScope . callCtx . grp , p , thread , fncall . debugCallName , fncall . debugCallName ) ; err != nil {
fncall . err = fmt . Errorf ( "could not step out of %s: %v" , fncall . debugCallName , err )
2018-05-04 17:31:45 +00:00
}
2022-05-05 15:41:40 +00:00
if bi . Arch . Name == "amd64" {
// The tail of debugCallV2 corrupts the state of RFLAGS, we must restore
// it one extra time after stepping out of it.
// See https://github.com/go-delve/delve/issues/2985 and
// TestCallInjectionFlagCorruption
rflags := bi . Arch . RegistersToDwarfRegisters ( 0 , fncall . savedRegs ) . Uint64Val ( regnum . AMD64_Rflags )
err := thread . SetReg ( regnum . AMD64_Rflags , op . DwarfRegisterFromUint64 ( rflags ) )
if err != nil {
fncall . err = fmt . Errorf ( "could not restore RFLAGS register: %v" , err )
}
}
2019-05-09 15:29:58 +00:00
return true
2018-05-04 17:31:45 +00:00
2022-05-03 17:46:24 +00:00
case debugCallRegReadReturn : // 1
2018-05-04 17:31:45 +00:00
// read return arguments from stack
2023-10-17 18:21:59 +00:00
stack . callInjectionContinue = true
if fncall . panicvar != nil || fncall . err != nil {
2018-05-04 17:31:45 +00:00
break
}
2021-07-02 16:37:55 +00:00
retScope , err := ThreadScope ( p , thread )
2018-05-04 17:31:45 +00:00
if err != nil {
fncall . err = fmt . Errorf ( "could not get return values: %v" , err )
break
}
// pretend we are still inside the function we called
2019-05-30 15:08:37 +00:00
fakeFunctionEntryScope ( retScope , fncall . fn , int64 ( regs . SP ( ) ) , regs . SP ( ) - uint64 ( bi . Arch . PtrSize ( ) ) )
2021-08-23 08:03:54 +00:00
var flags localsFlags
flags |= localsNoDeclLineCheck // if the function we are calling is an autogenerated stub then declaration lines have no meaning
if ! bi . regabi {
flags |= localsTrustArgOrder
}
2018-05-04 17:31:45 +00:00
2024-06-24 20:04:06 +00:00
fncall . retvars , err = retScope . Locals ( flags , "" )
2018-05-04 17:31:45 +00:00
if err != nil {
fncall . err = fmt . Errorf ( "could not get return values: %v" , err )
break
}
fncall . retvars = filterVariables ( fncall . retvars , func ( v * Variable ) bool {
return ( v . Flags & VariableReturnArgument ) != 0
} )
2024-10-04 17:44:57 +00:00
if ! fncall . doPinning {
loadValues ( fncall . retvars , callScope . callCtx . retLoadCfg )
}
2019-07-16 20:12:16 +00:00
for _ , v := range fncall . retvars {
v . Flags |= VariableFakeAddress
}
2018-05-04 17:31:45 +00:00
2024-10-04 17:44:57 +00:00
if fncall . doPinning {
stack . callInjectionContinue = false
for _ , v := range fncall . retvars {
saveVariable ( v )
allPointers ( v , "" , func ( addr uint64 , _ string ) error {
if addr != 0 && pointerEscapes ( addr , callScope . g . stack , callScope . callCtx . stacks ) {
fncall . addrsToPin = append ( fncall . addrsToPin , addr )
}
return nil
} )
2022-05-03 17:46:24 +00:00
}
2024-10-04 17:44:57 +00:00
slices . Sort ( fncall . addrsToPin )
fncall . addrsToPin = slices . Compact ( fncall . addrsToPin )
return false // will continue with evalop.CallInjectionComplete2
2022-05-03 17:46:24 +00:00
}
2020-07-28 16:19:51 +00:00
2024-10-04 17:44:57 +00:00
callInjectionComplete2 ( callScope , bi , fncall , regs , thread )
2022-05-03 17:46:24 +00:00
case debugCallRegReadPanic : // 2
2018-05-04 17:31:45 +00:00
// read panic value from stack
2023-10-17 18:21:59 +00:00
stack . callInjectionContinue = true
2022-05-03 17:46:24 +00:00
archoff := uint64 ( 0 )
if bi . Arch . Name == "arm64" {
archoff = 8
2023-09-21 17:39:57 +00:00
} else if bi . Arch . Name == "ppc64le" {
archoff = 32
2022-05-03 17:46:24 +00:00
}
fncall . panicvar , err = readStackVariable ( p , thread , regs , archoff , "interface {}" , callScope . callCtx . retLoadCfg )
2018-05-04 17:31:45 +00:00
if err != nil {
fncall . err = fmt . Errorf ( "could not get panic: %v" , err )
break
}
fncall . panicvar . Name = "~panic"
default :
2021-07-08 15:47:53 +00:00
// Got an unknown protocol register value, this is probably bad but the safest thing
2018-05-04 17:31:45 +00:00
// possible is to ignore it and hope it didn't matter.
2023-10-17 18:21:59 +00:00
stack . callInjectionContinue = true
2021-07-08 15:47:53 +00:00
fncallLog ( "unknown value of protocol register %#x" , regval )
2018-05-04 17:31:45 +00:00
}
2019-05-09 15:29:58 +00:00
return false
2018-05-04 17:31:45 +00:00
}
2024-10-04 17:44:57 +00:00
func callInjectionComplete2 ( callScope * EvalScope , bi * BinaryInfo , fncall * functionCallState , regs Registers , thread Thread ) {
// Store the stack span of the currently running goroutine (which in Go >=
// 1.15 might be different from the original injection goroutine) so that
// later on we can use it to perform the escapeCheck
if threadg , _ := GetG ( thread ) ; threadg != nil {
callScope . callCtx . stacks = append ( callScope . callCtx . stacks , threadg . stack )
}
if bi . Arch . Name == "arm64" || bi . Arch . Name == "ppc64le" {
oldlr , err := readUintRaw ( thread . ProcessMemory ( ) , regs . SP ( ) , int64 ( bi . Arch . PtrSize ( ) ) )
if err != nil {
fncall . err = fmt . Errorf ( "could not restore LR: %v" , err )
return
}
if err = setLR ( thread , oldlr ) ; err != nil {
fncall . err = fmt . Errorf ( "could not restore LR: %v" , err )
return
}
}
}
2023-10-17 18:21:59 +00:00
func ( scope * EvalScope ) evalCallInjectionSetTarget ( op * evalop . CallInjectionSetTarget , stack * evalStack , thread Thread ) {
fncall := stack . fncallPeek ( )
2024-10-04 17:44:57 +00:00
if ! fncall . hasDebugPinner && ( fncall . fn == nil || fncall . receiver != nil || fncall . closureAddr != 0 ) {
2024-11-06 18:27:44 +00:00
stack . err = funcCallEvalFuncExpr ( scope , stack , fncall )
if stack . err != nil {
return
}
2023-10-17 18:21:59 +00:00
}
stack . pop ( ) // target function, consumed by funcCallEvalFuncExpr either above or in evalop.CallInjectionStart
regs , err := thread . Registers ( )
if err != nil {
stack . err = err
return
}
if fncall . closureAddr != 0 {
// When calling a function pointer we must set the DX register to the
// address of the function pointer itself.
setClosureReg ( thread , fncall . closureAddr )
}
undo := new ( undoInjection )
undo . oldpc = regs . PC ( )
if scope . BinInfo . Arch . Name == "arm64" || scope . BinInfo . Arch . Name == "ppc64le" {
undo . oldlr = regs . LR ( )
}
callOP ( scope . BinInfo , thread , regs , fncall . fn . Entry )
fncall . undoInjection = undo
if fncall . receiver != nil {
err := funcCallCopyOneArg ( scope , fncall , fncall . receiver , & fncall . formalArgs [ 0 ] , thread )
if err != nil {
2024-10-04 17:44:57 +00:00
stack . err = fmt . Errorf ( "could not set call receiver: %v" , err )
2023-10-17 18:21:59 +00:00
return
}
fncall . formalArgs = fncall . formalArgs [ 1 : ]
}
}
2022-05-03 17:46:24 +00:00
func readStackVariable ( t * Target , thread Thread , regs Registers , off uint64 , typename string , loadCfg LoadConfig ) ( * Variable , error ) {
2018-05-04 17:31:45 +00:00
bi := thread . BinInfo ( )
2021-07-02 16:37:55 +00:00
scope , err := ThreadScope ( t , thread )
2018-05-04 17:31:45 +00:00
if err != nil {
return nil , err
}
typ , err := bi . findType ( typename )
if err != nil {
return nil , err
}
2022-05-03 17:46:24 +00:00
v := newVariable ( "" , regs . SP ( ) + off , typ , scope . BinInfo , scope . Mem )
2018-05-04 17:31:45 +00:00
v . loadValue ( loadCfg )
if v . Unreadable != nil {
return nil , v . Unreadable
}
2019-07-16 20:12:16 +00:00
v . Flags |= VariableFakeAddress
2018-05-04 17:31:45 +00:00
return v , nil
}
2022-10-05 15:17:53 +00:00
// fakeFunctionEntryScope alters scope to pretend that we are at the entry point of
2018-05-04 17:31:45 +00:00
// fn and CFA and SP are the ones passed as argument.
// This function is used to create a scope for a call frame that doesn't
// exist anymore, to read the return variables of an injected function call,
// or after a stepout command.
func fakeFunctionEntryScope ( scope * EvalScope , fn * Function , cfa int64 , sp uint64 ) error {
scope . PC = fn . Entry
scope . Fn = fn
2023-06-14 11:23:46 +00:00
scope . File , scope . Line = scope . BinInfo . EntryLineForFunc ( fn )
2018-05-04 17:31:45 +00:00
scope . Regs . CFA = cfa
2020-05-13 18:56:50 +00:00
scope . Regs . Reg ( scope . Regs . SPRegNum ) . Uint64Val = sp
2021-07-08 15:47:53 +00:00
scope . Regs . Reg ( scope . Regs . PCRegNum ) . Uint64Val = fn . Entry
2018-05-04 17:31:45 +00:00
2019-05-08 21:06:38 +00:00
fn . cu . image . dwarfReader . Seek ( fn . offset )
e , err := fn . cu . image . dwarfReader . Next ( )
2018-05-04 17:31:45 +00:00
if err != nil {
return err
}
2021-08-03 16:51:15 +00:00
scope . Regs . FrameBase , _ , _ , _ = scope . BinInfo . Location ( e , dwarf . AttrFrameBase , scope . PC , scope . Regs , nil )
2018-05-04 17:31:45 +00:00
return nil
}
2024-04-19 15:44:47 +00:00
func ( scope * EvalScope ) callInjectionStartSpecial ( stack * evalStack , op * evalop . CallInjectionStartSpecial , curthread Thread ) bool {
fnv , err := scope . findGlobalInternal ( op . FnName )
if fnv == nil {
2024-10-04 17:44:57 +00:00
if err == nil {
err = fmt . Errorf ( "function %s not found" , op . FnName )
}
2024-04-19 15:44:47 +00:00
stack . err = err
return false
}
stack . push ( fnv )
scope . evalCallInjectionStart ( & evalop . CallInjectionStart { HasFunc : true , Node : & ast . CallExpr {
Fun : & ast . Ident { Name : op . FnName } ,
Args : op . ArgAst ,
} } , stack )
if stack . err == nil {
stack . pop ( ) // return value of evalop.CallInjectionStart
2023-10-17 18:21:59 +00:00
return true
2024-04-19 15:44:47 +00:00
}
return false
}
2023-10-17 18:21:59 +00:00
2024-04-19 15:44:47 +00:00
func ( scope * EvalScope ) convertAllocToString ( stack * evalStack ) {
mallocv := stack . pop ( )
v := stack . pop ( )
2024-10-04 17:44:57 +00:00
mallocv . loadValue ( loadFullValue )
2024-04-19 15:44:47 +00:00
if mallocv . Unreadable != nil {
stack . err = mallocv . Unreadable
return
}
2023-10-17 18:21:59 +00:00
2024-04-19 15:44:47 +00:00
if mallocv . DwarfType . String ( ) != "*void" {
stack . err = fmt . Errorf ( "unexpected return type for mallocgc call: %v" , mallocv . DwarfType . String ( ) )
return
}
2023-10-17 18:21:59 +00:00
2024-04-19 15:44:47 +00:00
if len ( mallocv . Children ) != 1 {
stack . err = errors . New ( "internal error, could not interpret return value of mallocgc call" )
return
2019-06-17 16:51:29 +00:00
}
2023-10-17 18:21:59 +00:00
2024-04-19 15:44:47 +00:00
v . Base = mallocv . Children [ 0 ] . Addr
_ , stack . err = scope . Mem . WriteMemory ( v . Base , [ ] byte ( constant . StringVal ( v . Value ) ) )
stack . push ( v )
2019-06-17 16:51:29 +00:00
}
2019-06-30 17:44:30 +00:00
2020-07-28 16:19:51 +00:00
func isCallInjectionStop ( t * Target , thread Thread , loc * Location ) bool {
2019-06-30 17:44:30 +00:00
if loc . Fn == nil {
return false
}
2020-07-28 16:19:51 +00:00
if ! strings . HasPrefix ( loc . Fn . Name , debugCallFunctionNamePrefix1 ) && ! strings . HasPrefix ( loc . Fn . Name , debugCallFunctionNamePrefix2 ) {
return false
}
2020-12-10 17:03:11 +00:00
if loc . PC == loc . Fn . Entry {
// call injection just started, did not make any progress before being interrupted by a concurrent breakpoint.
return false
}
2022-05-03 17:46:24 +00:00
off := int64 ( 0 )
if thread . BinInfo ( ) . Arch . breakInstrMovesPC {
off = - int64 ( len ( thread . BinInfo ( ) . Arch . breakpointInstruction ) )
}
text , err := disassembleCurrentInstruction ( t , thread , off )
2023-07-13 18:30:32 +00:00
if err != nil || len ( text ) == 0 {
2020-07-28 16:19:51 +00:00
return false
}
return text [ 0 ] . IsHardBreak ( )
2019-06-30 17:44:30 +00:00
}
// callInjectionProtocol is the function called from Continue to progress
// the injection protocol for all threads.
// Returns true if a call injection terminated
2024-10-04 17:44:57 +00:00
func callInjectionProtocol ( t * Target , trapthread Thread , threads [ ] Thread ) ( done bool , err error ) {
2020-01-21 20:41:24 +00:00
if len ( t . fncallForG ) == 0 {
2019-06-30 17:44:30 +00:00
// we aren't injecting any calls, no need to check the threads.
return false , nil
}
2023-09-19 16:34:34 +00:00
currentThread := t . currentThread
defer func ( ) {
t . currentThread = currentThread
} ( )
2019-06-30 17:44:30 +00:00
for _ , thread := range threads {
loc , err := thread . Location ( )
if err != nil {
continue
}
2024-10-04 17:44:57 +00:00
if ( thread . ThreadID ( ) != trapthread . ThreadID ( ) ) && ! thread . SoftExc ( ) {
continue
}
2020-07-28 16:19:51 +00:00
if ! isCallInjectionStop ( t , thread , loc ) {
2019-06-30 17:44:30 +00:00
continue
}
2023-09-19 16:34:34 +00:00
regs , _ := thread . Registers ( )
fncallLog ( "call injection found thread=%d %s %s:%d PC=%#x SP=%#x" , thread . ThreadID ( ) , loc . Fn . Name , loc . File , loc . Line , regs . PC ( ) , regs . SP ( ) )
2020-07-28 16:19:51 +00:00
g , callinj , err := findCallInjectionStateForThread ( t , thread )
2019-06-30 17:44:30 +00:00
if err != nil {
2020-07-28 16:19:51 +00:00
return false , err
2019-06-30 17:44:30 +00:00
}
2020-07-28 16:19:51 +00:00
2022-05-03 17:46:24 +00:00
arch := thread . BinInfo ( ) . Arch
if ! arch . breakInstrMovesPC {
setPC ( thread , loc . PC + uint64 ( len ( arch . breakpointInstruction ) ) )
}
2020-07-28 16:19:51 +00:00
fncallLog ( "step for injection on goroutine %d (current) thread=%d (location %s)" , g . ID , thread . ThreadID ( ) , loc . Fn . Name )
2023-09-19 16:34:34 +00:00
t . currentThread = thread
2023-10-23 19:29:04 +00:00
callinj . evalStack . resume ( g )
if ! callinj . evalStack . callInjectionContinue {
err := finishEvalExpressionWithCalls ( t , g , callinj . evalStack )
2019-06-30 17:44:30 +00:00
if err != nil {
return done , err
}
done = true
}
}
return done , nil
}
2020-07-28 16:19:51 +00:00
func findCallInjectionStateForThread ( t * Target , thread Thread ) ( * G , * callInjection , error ) {
g , err := GetG ( thread )
if err != nil {
return nil , nil , fmt . Errorf ( "could not determine running goroutine for thread %#x currently executing the function call injection protocol: %v" , thread . ThreadID ( ) , err )
}
fncallLog ( "findCallInjectionStateForThread thread=%d goroutine=%d" , thread . ThreadID ( ) , g . ID )
notfound := func ( ) error {
return fmt . Errorf ( "could not recover call injection state for goroutine %d (thread %d)" , g . ID , thread . ThreadID ( ) )
}
callinj := t . fncallForG [ g . ID ]
if callinj != nil {
2023-10-23 19:29:04 +00:00
if callinj . evalStack == nil {
2020-07-28 16:19:51 +00:00
return nil , nil , notfound ( )
}
return g , callinj , nil
}
// In Go 1.15 and later the call injection protocol will switch to a
// different goroutine.
// Here we try to recover the injection goroutine by checking the injection
// thread.
for goid , callinj := range t . fncallForG {
2023-10-23 19:29:04 +00:00
if callinj != nil && callinj . evalStack != nil && callinj . startThreadID != 0 && callinj . startThreadID == thread . ThreadID ( ) {
2020-07-28 16:19:51 +00:00
t . fncallForG [ g . ID ] = callinj
fncallLog ( "goroutine %d is the goroutine executing the call injection started in goroutine %d" , g . ID , goid )
return g , callinj , nil
}
}
return nil , nil , notfound ( )
}
2021-07-08 15:47:53 +00:00
// debugCallFunction searches for the debug call function in the binary and
// uses this search to detect the debug call version.
// Returns the debug call function and its version as an integer (the lowest
// valid version is 1) or nil and zero.
func debugCallFunction ( bi * BinaryInfo ) ( * Function , int ) {
for version := maxDebugCallVersion ; version >= 1 ; version -- {
name := debugCallFunctionNamePrefix2 + "V" + strconv . Itoa ( version )
2023-03-22 18:38:09 +00:00
fn := bi . lookupOneFunc ( name )
if fn != nil {
2021-07-08 15:47:53 +00:00
return fn , version
}
}
return nil , 0
}
// debugCallProtocolReg returns the register ID (as defined in pkg/dwarf/regnum)
// of the register used in the debug call protocol, given the debug call version.
// Also returns a bool indicating whether the version is supported.
2022-05-03 17:46:24 +00:00
func debugCallProtocolReg ( archName string , version int ) ( uint64 , bool ) {
switch archName {
case "amd64" :
var protocolReg uint64
switch version {
case 1 :
protocolReg = regnum . AMD64_Rax
case 2 :
protocolReg = regnum . AMD64_R12
default :
return 0 , false
}
return protocolReg , true
2023-09-21 17:39:57 +00:00
case "arm64" , "ppc64le" :
2022-05-03 17:46:24 +00:00
if version == 2 {
return regnum . ARM64_X0 + 20 , true
}
return 0 , false
2021-07-08 15:47:53 +00:00
default :
return 0 , false
}
}
2023-11-20 18:43:15 +00:00
// runtimeWhitelist is a list of functions in the runtime that we can call
// (through call injection) even if they are optimized.
var runtimeWhitelist = map [ string ] bool {
2024-10-04 17:44:57 +00:00
"runtime.mallocgc" : true ,
evalop . DebugPinnerFunctionName : true ,
"runtime.(*Pinner).Unpin" : true ,
"runtime.(*Pinner).Pin" : true ,
2021-07-08 15:47:53 +00:00
}
2023-11-20 18:43:15 +00:00
// runtimeOptimizedWorkaround modifies the input DIE so that arguments and
// return variables have the appropriate registers for call injection.
// This function can not be called on arbitrary DIEs, it is only valid for
// the functions specified in runtimeWhitelist.
// In particular this will fail if any of the arguments of the function
// passed in input does not fit in an integer CPU register.
func runtimeOptimizedWorkaround ( bi * BinaryInfo , image * Image , in * godwarf . Tree ) {
if image . workaroundCache == nil {
image . workaroundCache = make ( map [ dwarf . Offset ] * godwarf . Tree )
}
if image . workaroundCache [ in . Offset ] == in {
return
2021-07-08 15:47:53 +00:00
}
2023-11-20 18:43:15 +00:00
image . workaroundCache [ in . Offset ] = in
2021-07-08 15:47:53 +00:00
2023-11-20 18:43:15 +00:00
curArg , curRet := 0 , 0
for _ , child := range in . Children {
if child . Tag == dwarf . TagFormalParameter {
childEntry , ok := child . Entry . ( * dwarf . Entry )
if ! ok {
panic ( "internal error: bad DIE for runtimeOptimizedWorkaround" )
}
isret , _ := child . Entry . Val ( dwarf . AttrVarParam ) . ( bool )
var reg int
if isret {
reg = bi . Arch . argumentRegs [ curRet ]
curRet ++
} else {
reg = bi . Arch . argumentRegs [ curArg ]
curArg ++
}
2021-07-08 15:47:53 +00:00
2023-11-20 18:43:15 +00:00
newlocfield := dwarf . Field { Attr : dwarf . AttrLocation , Val : [ ] byte { byte ( op . DW_OP_reg0 ) + byte ( reg ) } , Class : dwarf . ClassBlock }
2023-09-21 17:39:57 +00:00
2023-11-20 18:43:15 +00:00
locfield := childEntry . AttrField ( dwarf . AttrLocation )
if locfield != nil {
* locfield = newlocfield
} else {
childEntry . Field = append ( childEntry . Field , newlocfield )
}
}
2021-07-08 15:47:53 +00:00
}
}