delve/pkg/proc/native/threads_linux.go

124 lines
2.7 KiB
Go
Raw Normal View History

package native
2015-01-14 02:37:10 +00:00
import (
"fmt"
sys "golang.org/x/sys/unix"
"github.com/derekparker/delve/pkg/proc"
2015-01-14 02:37:10 +00:00
)
2016-01-15 05:26:54 +00:00
type WaitStatus sys.WaitStatus
2016-01-10 08:57:52 +00:00
// OSSpecificDetails hold Linux specific
// process details.
type OSSpecificDetails struct {
registers sys.PtraceRegs
}
2015-01-14 02:37:10 +00:00
proc/native: fix race condition between Halt and process death (linux) If a breakpoint is hit close to process death on a thread that isn't the group leader the process could die while we are trying to stop it. This can be easily reproduced by having the goroutine that's executing main.main (which will almost always run on the thread group leader) wait for a second goroutine before exiting, then setting a breakpoint on the second goroutine and stepping through it (see TestIssue1101 in proc_test.go). When stepping over the return instruction of main.f the deferred wg.Done() call will be executed which will cause the main goroutine to resume and proceed to exit. Both the temporary breakpoint on wg.Done and the temporary breakpoint on the return address of main.f will be in close proximity to main.main calling os.Exit() and causing the death of the thread group leader. Under these circumstances the call to native.(*Thread).waitFast in native.(*Thread).halt can hang forever due to a bug similar to https://sourceware.org/bugzilla/show_bug.cgi?id=12702 (see comment in native.(*Thread).wait for an explanation). Replacing waitFast with a normal wait work in most circumstances, however, besides the performance hit, it looks like in this circumstances trapWait sometimes receives a spurious SIGTRAP on the dying group leader which would cause the subsequent call to wait in halt to accidentally reap the process without noting that it did exit. Instead this patch removes the call to wait from halt and instead calls trapWait in a loop in setCurrentBreakpoints until all threads are set to running=false. This is also a better fix than the workaround to ESRCH error while setting current breakpoints implemented in 94b50d. Fixes #1101
2018-01-29 10:07:38 +00:00
// Halt stops this thread from executing.
func (thread *Thread) Halt() (err error) {
if thread.Stopped() {
return
}
err = thread.halt()
return
}
func (t *Thread) halt() (err error) {
2017-02-08 00:23:47 +00:00
err = sys.Tgkill(t.dbp.pid, t.ID, sys.SIGSTOP)
2015-01-14 02:37:10 +00:00
if err != nil {
2016-01-10 08:57:52 +00:00
err = fmt.Errorf("halt err %s on thread %d", err, t.ID)
return
2015-01-14 02:37:10 +00:00
}
return
}
2016-01-10 08:57:52 +00:00
func (t *Thread) stopped() bool {
state := status(t.ID, t.dbp.os.comm)
return state == StatusTraceStop || state == StatusTraceStopT
2015-01-14 02:37:10 +00:00
}
func (t *Thread) resume() error {
return t.resumeWithSig(0)
}
func (t *Thread) resumeWithSig(sig int) (err error) {
t.running = true
t.dbp.execPtraceFunc(func() { err = PtraceCont(t.ID, sig) })
return
2015-01-14 02:37:10 +00:00
}
func (t *Thread) singleStep() (err error) {
for {
2016-01-10 08:57:52 +00:00
t.dbp.execPtraceFunc(func() { err = sys.PtraceSingleStep(t.ID) })
if err != nil {
return err
}
wpid, status, err := t.dbp.waitFast(t.ID)
if err != nil {
return err
}
2017-02-08 00:23:47 +00:00
if (status == nil || status.Exited()) && wpid == t.dbp.pid {
t.dbp.postExit()
rs := 0
if status != nil {
rs = status.ExitStatus()
}
return proc.ProcessExitedError{Pid: t.dbp.pid, Status: rs}
}
2016-01-10 08:57:52 +00:00
if wpid == t.ID && status.StopSignal() == sys.SIGTRAP {
return nil
}
2015-01-14 02:37:10 +00:00
}
}
func (t *Thread) Blocked() bool {
regs, err := t.Registers(false)
if err != nil {
return false
}
pc := regs.PC()
fn := t.BinInfo().PCToFunc(pc)
2015-02-28 14:05:37 +00:00
if fn != nil && ((fn.Name == "runtime.futex") || (fn.Name == "runtime.usleep") || (fn.Name == "runtime.clone")) {
2015-02-27 23:11:13 +00:00
return true
}
return false
}
func (t *Thread) saveRegisters() (proc.Registers, error) {
var err error
2016-01-10 08:57:52 +00:00
t.dbp.execPtraceFunc(func() { err = sys.PtraceGetRegs(t.ID, &t.os.registers) })
if err != nil {
2015-05-04 22:31:13 +00:00
return nil, fmt.Errorf("could not save register contents")
}
return &Regs{&t.os.registers, nil}, nil
2015-01-14 02:37:10 +00:00
}
2016-01-10 08:57:52 +00:00
func (t *Thread) restoreRegisters() (err error) {
t.dbp.execPtraceFunc(func() { err = sys.PtraceSetRegs(t.ID, &t.os.registers) })
return
2015-01-14 02:37:10 +00:00
}
func (t *Thread) WriteMemory(addr uintptr, data []byte) (written int, err error) {
if t.dbp.exited {
return 0, proc.ProcessExitedError{Pid: t.dbp.pid}
}
if len(data) == 0 {
return
}
2016-01-10 08:57:52 +00:00
t.dbp.execPtraceFunc(func() { written, err = sys.PtracePokeData(t.ID, addr, data) })
return
}
func (t *Thread) ReadMemory(data []byte, addr uintptr) (n int, err error) {
if t.dbp.exited {
return 0, proc.ProcessExitedError{Pid: t.dbp.pid}
}
if len(data) == 0 {
return
}
2016-01-10 08:57:52 +00:00
t.dbp.execPtraceFunc(func() { _, err = sys.PtracePeekData(t.ID, addr, data) })
return
}