delve/pkg/proc/core/core_test.go

370 lines
9.3 KiB
Go
Raw Normal View History

package core
import (
"bytes"
"flag"
"fmt"
"go/constant"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"github.com/derekparker/delve/pkg/goversion"
"github.com/derekparker/delve/pkg/proc"
"github.com/derekparker/delve/pkg/proc/test"
)
var buildMode string
func TestMain(m *testing.M) {
flag.StringVar(&buildMode, "test-buildmode", "", "selects build mode")
if buildMode != "" && buildMode != "pie" {
fmt.Fprintf(os.Stderr, "unknown build mode %q", buildMode)
os.Exit(1)
}
os.Exit(test.RunTestsWithFixtures(m))
}
func assertNoError(err error, t testing.TB, s string) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fname := filepath.Base(file)
t.Fatalf("failed assertion at %s:%d: %s - %s\n", fname, line, s, err)
}
}
func TestSplicedReader(t *testing.T) {
data := []byte{}
data2 := []byte{}
for i := 0; i < 100; i++ {
data = append(data, byte(i))
data2 = append(data2, byte(i+100))
}
type region struct {
data []byte
off uintptr
length uintptr
}
tests := []struct {
name string
regions []region
readAddr uintptr
readLen int
want []byte
}{
{
"Insert after",
[]region{
{data, 0, 1},
{data2, 1, 1},
},
0,
2,
[]byte{0, 101},
},
{
"Insert before",
[]region{
{data, 1, 1},
{data2, 0, 1},
},
0,
2,
[]byte{100, 1},
},
{
"Completely overwrite",
[]region{
{data, 1, 1},
{data2, 0, 3},
},
0,
3,
[]byte{100, 101, 102},
},
{
"Overwrite end",
[]region{
{data, 0, 2},
{data2, 1, 2},
},
0,
3,
[]byte{0, 101, 102},
},
{
"Overwrite start",
[]region{
{data, 0, 3},
{data2, 0, 2},
},
0,
3,
[]byte{100, 101, 2},
},
{
"Punch hole",
[]region{
{data, 0, 5},
{data2, 1, 3},
},
0,
5,
[]byte{0, 101, 102, 103, 4},
},
{
"Overlap two",
[]region{
{data, 10, 4},
{data, 14, 4},
{data2, 12, 4},
},
10,
8,
[]byte{10, 11, 112, 113, 114, 115, 16, 17},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mem := &SplicedMemory{}
for _, region := range test.regions {
r := bytes.NewReader(region.data)
mem.Add(&OffsetReaderAt{r, 0}, region.off, region.length)
}
got := make([]byte, test.readLen)
n, err := mem.ReadMemory(got, test.readAddr)
if n != test.readLen || err != nil || !reflect.DeepEqual(got, test.want) {
t.Errorf("ReadAt = %v, %v, %v, want %v, %v, %v", n, err, got, test.readLen, nil, test.want)
}
})
}
}
func withCoreFile(t *testing.T, name, args string) *Process {
// This is all very fragile and won't work on hosts with non-default core patterns.
// Might be better to check in the binary and core?
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
test.PathsToRemove = append(test.PathsToRemove, tempDir)
var buildFlags test.BuildFlags
if buildMode == "pie" {
buildFlags = test.BuildModePIE
}
fix := test.BuildFixture(name, buildFlags)
bashCmd := fmt.Sprintf("cd %v && ulimit -c unlimited && GOTRACEBACK=crash %v %s", tempDir, fix.Path, args)
exec.Command("bash", "-c", bashCmd).Run()
cores, err := filepath.Glob(path.Join(tempDir, "core*"))
switch {
case err != nil || len(cores) > 1:
t.Fatalf("Got %v, wanted one file named core* in %v", cores, tempDir)
case len(cores) == 0:
t.Skipf("core file was not produced, could not run test")
return nil
}
corePath := cores[0]
p, err := OpenCore(corePath, fix.Path, []string{})
if err != nil {
t.Errorf("ReadCore(%q) failed: %v", corePath, err)
pat, err := ioutil.ReadFile("/proc/sys/kernel/core_pattern")
t.Errorf("read core_pattern: %q, %v", pat, err)
apport, err := ioutil.ReadFile("/var/log/apport.log")
t.Errorf("read apport log: %q, %v", apport, err)
t.Fatalf("previous errors")
}
return p
}
func TestCore(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
}
p := withCoreFile(t, "panic", "")
gs, err := proc.GoroutinesInfo(p)
if err != nil || len(gs) == 0 {
t.Fatalf("GoroutinesInfo() = %v, %v; wanted at least one goroutine", gs, err)
}
var panicking *proc.G
var panickingStack []proc.Stackframe
for _, g := range gs {
t.Logf("Goroutine %d", g.ID)
stack, err := g.Stacktrace(10, false)
if err != nil {
t.Errorf("Stacktrace() on goroutine %v = %v", g, err)
}
for _, frame := range stack {
fnname := ""
if frame.Call.Fn != nil {
fnname = frame.Call.Fn.Name
}
t.Logf("\tframe %s:%d in %s %#x (systemstack: %v)", frame.Call.File, frame.Call.Line, fnname, frame.Call.PC, frame.SystemStack)
if frame.Current.Fn != nil && strings.Contains(frame.Current.Fn.Name, "panic") {
panicking = g
panickingStack = stack
}
}
}
if panicking == nil {
t.Fatalf("Didn't find a call to panic in goroutine stacks: %v", gs)
}
var mainFrame *proc.Stackframe
// Walk backward, because the current function seems to be main.main
// in the actual call to panic().
for i := len(panickingStack) - 1; i >= 0; i-- {
if panickingStack[i].Current.Fn != nil && panickingStack[i].Current.Fn.Name == "main.main" {
mainFrame = &panickingStack[i]
}
}
if mainFrame == nil {
t.Fatalf("Couldn't find main in stack %v", panickingStack)
}
msg, err := proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, *mainFrame).EvalVariable("msg", proc.LoadConfig{MaxStringLen: 64})
if err != nil {
t.Fatalf("Couldn't EvalVariable(msg, ...): %v", err)
}
if constant.StringVal(msg.Value) != "BOOM!" {
t.Errorf("main.msg = %q, want %q", msg.Value, "BOOM!")
}
regs, err := p.CurrentThread().Registers(true)
if err != nil {
t.Fatalf("Couldn't get current thread registers: %v", err)
}
regslice := regs.Slice()
for _, reg := range regslice {
t.Logf("%s = %s", reg.Name, reg.Value)
}
}
func TestCoreFpRegisters(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
}
// in go1.10 the crash is executed on a different thread and registers are
// no longer available in the core dump.
if ver, _ := goversion.Parse(runtime.Version()); ver.Major < 0 || ver.AfterOrEqual(goversion.GoVersion{1, 10, -1, 0, 0, ""}) {
t.Skip("not supported in go1.10 and later")
}
p := withCoreFile(t, "fputest/", "panic")
gs, err := proc.GoroutinesInfo(p)
if err != nil || len(gs) == 0 {
t.Fatalf("GoroutinesInfo() = %v, %v; wanted at least one goroutine", gs, err)
}
var regs proc.Registers
for _, thread := range p.ThreadList() {
frames, err := proc.ThreadStacktrace(thread, 10)
if err != nil {
t.Errorf("ThreadStacktrace for %x = %v", thread.ThreadID(), err)
continue
}
for i := range frames {
if frames[i].Current.Fn == nil {
continue
}
if frames[i].Current.Fn.Name == "main.main" {
regs, err = thread.Registers(true)
if err != nil {
t.Fatalf("Could not get registers for thread %x, %v", thread.ThreadID(), err)
}
break
}
}
if regs != nil {
break
}
}
regtests := []struct{ name, value string }{
{"ST(0)", "0x3fffe666660000000000"},
{"ST(1)", "0x3fffd9999a0000000000"},
{"ST(2)", "0x3fffcccccd0000000000"},
{"ST(3)", "0x3fffc000000000000000"},
{"ST(4)", "0x3fffb333333333333000"},
{"ST(5)", "0x3fffa666666666666800"},
{"ST(6)", "0x3fff9999999999999800"},
{"ST(7)", "0x3fff8cccccccccccd000"},
// Unlike TestClientServer_FpRegisters in service/test/integration2_test
// we can not test the value of XMM0, it probably has been reused by
// something between the panic and the time we get the core dump.
{"XMM9", "0x3ff66666666666663ff4cccccccccccd"},
{"XMM10", "0x3fe666663fd9999a3fcccccd3fc00000"},
{"XMM3", "0x3ff199999999999a3ff3333333333333"},
{"XMM4", "0x3ff4cccccccccccd3ff6666666666666"},
{"XMM5", "0x3fcccccd3fc000003fe666663fd9999a"},
{"XMM6", "0x4004cccccccccccc4003333333333334"},
{"XMM7", "0x40026666666666664002666666666666"},
{"XMM8", "0x4059999a404ccccd4059999a404ccccd"},
}
for _, reg := range regs.Slice() {
t.Logf("%s = %s", reg.Name, reg.Value)
}
for _, regtest := range regtests {
found := false
for _, reg := range regs.Slice() {
if reg.Name == regtest.name {
found = true
if !strings.HasPrefix(reg.Value, regtest.value) {
t.Fatalf("register %s expected %q got %q", reg.Name, regtest.value, reg.Value)
}
}
}
if !found {
t.Fatalf("register %s not found: %v", regtest.name, regs)
}
}
}
func TestCoreWithEmptyString(t *testing.T) {
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return
}
p := withCoreFile(t, "coreemptystring", "")
gs, err := proc.GoroutinesInfo(p)
assertNoError(err, t, "GoroutinesInfo")
var mainFrame *proc.Stackframe
mainSearch:
for _, g := range gs {
stack, err := g.Stacktrace(10, false)
assertNoError(err, t, "Stacktrace()")
for _, frame := range stack {
if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.main" {
mainFrame = &frame
break mainSearch
}
}
}
if mainFrame == nil {
t.Fatal("could not find main.main frame")
}
scope := proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, *mainFrame)
proc: Improve performance of loadMap on very large sparse maps Users can create sparse maps in two ways, either by: a) adding lots of entries to a map and then deleting most of them, or b) using the make(mapType, N) expression with a very large N When this happens reading the resulting map will be very slow because loadMap needs to scan many buckets for each entry it finds. Technically this is not a bug, the user just created a map that's very sparse and therefore very slow to read. However it's very annoying to have the debugger hang for several seconds when trying to read the local variables just because one of them (which you might not even be interested into) happens to be a very sparse map. There is an easy mitigation to this problem: not reading any additional buckets once we know that we have already read all entries of the map, or as many entries as we need to fulfill the MaxArrayValues parameter. Unfortunately this is mostly useless, a VLSM (Very Large Sparse Map) with a single entry will still be slow to access, because the single entry in the map could easily end up in the last bucket. The obvious solution to this problem is to set a limit to the number of buckets we read when loading a map. However there is no good way to set this limit. If we hardcode it there will be no way to print maps that are beyond whatever limit we pick. We could let users (or clients) specify it but the meaning of such knob would be arcane and they would have no way of picking a good value (because there is no objectively good value for it). The solution used in this commit is to set an arbirtray limit on the number of buckets we read but only when loadMap is invoked through API calls ListLocalVars and ListFunctionArgs. In this way `ListLocalVars` and `ListFunctionArgs` (which are often invoked automatically by GUI clients) remain fast even in presence of a VLSM, but the contents of the VLSM can still be inspected using `EvalVariable`.
2018-10-29 11:22:03 +00:00
v1, err := scope.EvalVariable("t", proc.LoadConfig{true, 1, 64, 64, -1, 0})
assertNoError(err, t, "EvalVariable(t)")
assertNoError(v1.Unreadable, t, "unreadable variable 't'")
t.Logf("t = %#v\n", v1)
proc: Improve performance of loadMap on very large sparse maps Users can create sparse maps in two ways, either by: a) adding lots of entries to a map and then deleting most of them, or b) using the make(mapType, N) expression with a very large N When this happens reading the resulting map will be very slow because loadMap needs to scan many buckets for each entry it finds. Technically this is not a bug, the user just created a map that's very sparse and therefore very slow to read. However it's very annoying to have the debugger hang for several seconds when trying to read the local variables just because one of them (which you might not even be interested into) happens to be a very sparse map. There is an easy mitigation to this problem: not reading any additional buckets once we know that we have already read all entries of the map, or as many entries as we need to fulfill the MaxArrayValues parameter. Unfortunately this is mostly useless, a VLSM (Very Large Sparse Map) with a single entry will still be slow to access, because the single entry in the map could easily end up in the last bucket. The obvious solution to this problem is to set a limit to the number of buckets we read when loading a map. However there is no good way to set this limit. If we hardcode it there will be no way to print maps that are beyond whatever limit we pick. We could let users (or clients) specify it but the meaning of such knob would be arcane and they would have no way of picking a good value (because there is no objectively good value for it). The solution used in this commit is to set an arbirtray limit on the number of buckets we read but only when loadMap is invoked through API calls ListLocalVars and ListFunctionArgs. In this way `ListLocalVars` and `ListFunctionArgs` (which are often invoked automatically by GUI clients) remain fast even in presence of a VLSM, but the contents of the VLSM can still be inspected using `EvalVariable`.
2018-10-29 11:22:03 +00:00
v2, err := scope.EvalVariable("s", proc.LoadConfig{true, 1, 64, 64, -1, 0})
assertNoError(err, t, "EvalVariable(s)")
assertNoError(v2.Unreadable, t, "unreadable variable 's'")
t.Logf("s = %#v\n", v2)
}