*: add ppc64le support (#2963)

* Add vendor/golang.org/x/arch/ppc64

* Add ppc64le support
This commit is contained in:
Álex Sáez 2023-07-07 18:30:38 +02:00 committed by GitHub
parent 9f3e146129
commit 71f1220717
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 7593 additions and 17 deletions

@ -13,11 +13,22 @@ Tests skipped by each supported backend:
* 4 not implemented
* linux/386/pie skipped = 1
* 1 broken
* linux/ppc64le skipped = 1
* 1 broken - cgo stacktraces
* linux/ppc64le/native skipped = 1
* 1 broken in linux ppc64le
* linux/ppc64le/native/pie skipped = 11
* 11 broken - pie mode
* pie skipped = 2
* 2 upstream issue - https://github.com/golang/go/issues/29322
* ppc64le skipped = 11
* 6 broken
* 1 broken - global variable symbolication
* 4 not implemented
* windows skipped = 4
* 1 broken
* 3 see https://github.com/go-delve/delve/issues/2768
* windows/arm64 skipped = 4
* windows/arm64 skipped = 5
* 3 broken
* 1 broken - cgo stacktraces
* 1 broken - step concurrent

@ -0,0 +1,7 @@
#include "textflag.h"
TEXT ·asmFunc(SB),0,$0-16
MOVD arg+0(FP), R5
MOVD (R5), R5
MOVD R5, ret+8(FP)
RET

@ -6,6 +6,8 @@
#define BREAKPOINT asm("int3;")
#elif __i386__
#define BREAKPOINT asm("int3;")
#elif __PPC64__
#define BREAKPOINT asm("tw 31,0,0;")
#elif __aarch64__
#ifdef WIN32
#define BREAKPOINT asm("brk 0xF000;")

@ -291,6 +291,9 @@ func tagFlags() string {
if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" {
tags = append(tags, "exp.winarm64")
}
if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
tags = append(tags, "exp.linuxppc64le")
}
if Tags != nil && len(*Tags) > 0 {
tags = append(tags, *Tags...)
}

@ -76,3 +76,11 @@ else
exit $x
fi
export GOARCH=ppc64le
go run _scripts/make.go --tags exp.linuxppc64le
x=$?
if [ "$version" = "gotip" ]; then
exit 0
else
exit $x
fi

@ -213,6 +213,9 @@ func getDlvBin(t *testing.T) string {
if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" {
tags = "-tags=exp.winarm64"
}
if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
tags = "-tags=exp.linuxppc64le"
}
return getDlvBinInternal(t, tags)
}
@ -371,6 +374,10 @@ func TestGeneratedDoc(t *testing.T) {
//TODO(qmuntal): investigate further when the Windows ARM64 backend is more stable.
t.Skip("skipping test on Windows in CI")
}
if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
//TODO(alexsaezm): finish CI integration
t.Skip("skipping test on Linux/PPC64LE in CI")
}
// Checks gen-cli-docs.go
var generatedBuf bytes.Buffer
commands := terminal.DebugCommands(nil)

115
pkg/dwarf/regnum/ppc64le.go Normal file

@ -0,0 +1,115 @@
package regnum
import "fmt"
// The mapping between hardware registers and DWARF registers is specified
// in the 64-Bit ELF V2 ABI Specification of the Power Architecture in section
// 2.4 DWARF Definition
// https://openpowerfoundation.org/specifications/64bitelfabi/
const (
// General Purpose Registers: from R0 to R31
PPC64LE_FIRST_GPR = 0
PPC64LE_R0 = PPC64LE_FIRST_GPR
PPC64LE_LAST_GPR = 31
// Floating point registers: from F0 to F31
PPC64LE_FIRST_FPR = 32
PPC64LE_F0 = PPC64LE_FIRST_FPR
PPC64LE_LAST_FPR = 63
// Vector (Altivec/VMX) registers: from V0 to V31
PPC64LE_FIRST_VMX = 64
PPC64LE_V0 = PPC64LE_FIRST_VMX
PPC64LE_LAST_VMX = 95
// Vector Scalar (VSX) registers: from VS0 to VS63
PPC64LE_FIRST_VSX = 96
PPC64LE_VS0 = PPC64LE_FIRST_VSX
PPC64LE_LAST_VSX = 160
// Condition Registers: from CR0 to CR7
PPC64LE_CR0 = 0
// Special registers
PPC64LE_SP = 1 // Stack frame pointer: Gpr[1]
PPC64LE_PC = 12 // The documentation refers to this as the CIA (Current Instruction Address)
PPC64LE_LR = 65 // Link register
)
func PPC64LEToName(num uint64) string {
switch {
case num == PPC64LE_SP:
return "SP"
case num == PPC64LE_PC:
return "PC"
case num == PPC64LE_LR:
return "LR"
case isGPR(num):
return fmt.Sprintf("r%d", int(num-PPC64LE_FIRST_GPR))
case isFPR(num):
return fmt.Sprintf("f%d", int(num-PPC64LE_FIRST_FPR))
case isVMX(num):
return fmt.Sprintf("v%d", int(num-PPC64LE_FIRST_VMX))
case isVSX(num):
return fmt.Sprintf("vs%d", int(num-PPC64LE_FIRST_VSX))
default:
return fmt.Sprintf("unknown%d", num)
}
}
// PPC64LEMaxRegNum is 172 registers in total, across 4 categories:
// General Purpose Registers or GPR (32 GPR + 9 special registers)
// Floating Point Registers or FPR (32 FPR + 1 special register)
// Altivec/VMX Registers or VMX (32 VMX + 2 special registers)
// VSX Registers or VSX (64 VSX)
// Documentation: https://lldb.llvm.org/cpp_reference/RegisterContextPOSIX__ppc64le_8cpp_source.html
func PPC64LEMaxRegNum() uint64 {
return 172
}
func isGPR(num uint64) bool {
return num < PPC64LE_LAST_GPR
}
func isFPR(num uint64) bool {
return num >= PPC64LE_FIRST_FPR && num <= PPC64LE_LAST_FPR
}
func isVMX(num uint64) bool {
return num >= PPC64LE_FIRST_VMX && num <= PPC64LE_LAST_VMX
}
func isVSX(num uint64) bool {
return num >= PPC64LE_FIRST_VSX && num <= PPC64LE_LAST_VSX
}
var PPC64LENameToDwarf = func() map[string]int {
r := make(map[string]int)
r["nip"] = PPC64LE_PC
r["sp"] = PPC64LE_SP
r["bp"] = PPC64LE_SP
r["link"] = PPC64LE_LR
// General Purpose Registers: from R0 to R31
for i := 0; i <= 31; i++ {
r[fmt.Sprintf("r%d", i)] = PPC64LE_R0 + i
}
// Floating point registers: from F0 to F31
for i := 0; i <= 31; i++ {
r[fmt.Sprintf("f%d", i)] = PPC64LE_F0 + i
}
// Vector (Altivec/VMX) registers: from V0 to V31
for i := 0; i <= 31; i++ {
r[fmt.Sprintf("v%d", i)] = PPC64LE_V0 + i
}
// Vector Scalar (VSX) registers: from VS0 to VS63
for i := 0; i <= 63; i++ {
r[fmt.Sprintf("vs%d", i)] = PPC64LE_VS0 + i
}
// Condition Registers: from CR0 to CR7
for i := 0; i <= 7; i++ {
r[fmt.Sprintf("cr%d", i)] = PPC64LE_CR0 + i
}
return r
}()

@ -151,5 +151,6 @@ func nameToDwarfFunc(n2d map[string]int) func(string) (int, bool) {
const (
crosscall2SPOffsetBad = 0x8
crosscall2SPOffsetWindowsAMD64 = 0x118
crosscall2SPOffsetLinuxPPC64LE = 0x158
crosscall2SPOffset = 0x58
)

@ -129,6 +129,7 @@ var (
elf.EM_X86_64: true,
elf.EM_AARCH64: true,
elf.EM_386: true,
elf.EM_PPC64: true,
}
supportedWindowsArch = map[_PEMachine]bool{
@ -687,6 +688,8 @@ func NewBinaryInfo(goos, goarch string) *BinaryInfo {
r.Arch = AMD64Arch(goos)
case "arm64":
r.Arch = ARM64Arch(goos)
case "ppc64le":
r.Arch = PPC64LEArch(goos)
}
return r
}
@ -1648,6 +1651,9 @@ func (bi *BinaryInfo) setGStructOffsetElf(image *Image, exe *elf.File, wg *sync.
bi.gStructOffset = tlsg.Value + uint64(bi.Arch.PtrSize()*2) + ((tls.Vaddr - uint64(bi.Arch.PtrSize()*2)) & (tls.Align - 1))
case elf.EM_PPC64:
_ = getSymbol(image, bi.logger, exe, "runtime.tls_g")
default:
// we should never get here
panic("architecture not supported")

@ -136,6 +136,8 @@ func (t *Target) Dump(out elfwriter.WriteCloserSeeker, flags DumpFlags, state *D
fhdr.Machine = elf.EM_386
case "arm64":
fhdr.Machine = elf.EM_AARCH64
case "ppc64le":
fhdr.Machine = elf.EM_PPC64
default:
panic("not implemented")
}

@ -0,0 +1,174 @@
package linutil
import (
"fmt"
"github.com/go-delve/delve/pkg/proc"
)
// PPC64LERegisters implements the proc.Registers interface for the native/linux
// backend and core/linux backends, on PPC64LE.
type PPC64LERegisters struct {
Regs *PPC64LEPtraceRegs
Fpregs []proc.Register //Formatted floating point registers
Fpregset []byte //holding all floating point register values
loadFpRegs func(*PPC64LERegisters) error
}
func NewPPC64LERegisters(regs *PPC64LEPtraceRegs, loadFpRegs func(*PPC64LERegisters) error) *PPC64LERegisters {
return &PPC64LERegisters{Regs: regs, loadFpRegs: loadFpRegs}
}
// PPC64LEPtraceRegs is the struct used by the linux kernel to return the
// general purpose registers for PPC64LE CPUs.
// Copied from src/syscall/ztypes_linux_ppc64le.go#L518-L532
type PPC64LEPtraceRegs struct {
Gpr [32]uint64 // 32 general-purpose registers, each 64 bits wide
Nip uint64
Msr uint64
Orig_gpr3 uint64
Ctr uint64
Link uint64 // Link register -- LLDB dwarf_lr_ppc64le = 65
Xer uint64 // Fixed point exception register -- LLDB dwarf_xer_ppc64le = 76
Ccr uint64
Softe uint64
Trap uint64
Dar uint64
Dsisr uint64
Result uint64
}
// PC returns the value of the NIP register
// Also called the IAR/Instruction Address Register or NIP/Next Instruction Pointer
func (r *PPC64LERegisters) PC() uint64 {
return r.Regs.Nip
}
// SP returns the value of Stack frame pointer stored in Gpr[1].
func (r *PPC64LERegisters) SP() uint64 {
return r.Regs.Gpr[1]
}
// LR The Link Register is a 64-bit register. It can be
// used to provide the branch target address for the
// Branch Conditional to Link Register instruction, and it
// holds the return address after Branch instructions for
// which LK=1 and after System Call Vectored instructions.
// Extracted from the 2.3.2 section of the PowerISA Book 3.1
func (r *PPC64LERegisters) LR() uint64 {
return r.Regs.Link
}
func (r *PPC64LERegisters) BP() uint64 {
return r.Regs.Gpr[1]
}
// TLS returns the value of the thread pointer stored in Gpr[13]
func (r *PPC64LERegisters) TLS() uint64 {
return r.Regs.Gpr[13]
}
// GAddr returns the address of the G variable
func (r *PPC64LERegisters) GAddr() (uint64, bool) {
return r.Regs.Gpr[30], true
}
// Slice returns the registers as a list of (name, value) pairs.
func (r *PPC64LERegisters) Slice(floatingPoint bool) ([]proc.Register, error) {
var regs = []struct {
k string
v uint64
}{
{"R0", r.Regs.Gpr[0]},
{"R1", r.Regs.Gpr[1]},
{"R2", r.Regs.Gpr[2]},
{"R3", r.Regs.Gpr[3]},
{"R4", r.Regs.Gpr[4]},
{"R5", r.Regs.Gpr[5]},
{"R6", r.Regs.Gpr[6]},
{"R7", r.Regs.Gpr[7]},
{"R8", r.Regs.Gpr[8]},
{"R9", r.Regs.Gpr[9]},
{"R10", r.Regs.Gpr[10]},
{"R11", r.Regs.Gpr[11]},
{"R12", r.Regs.Gpr[12]},
{"R13", r.Regs.Gpr[13]},
{"R14", r.Regs.Gpr[14]},
{"R15", r.Regs.Gpr[15]},
{"R16", r.Regs.Gpr[16]},
{"R17", r.Regs.Gpr[17]},
{"R18", r.Regs.Gpr[18]},
{"R19", r.Regs.Gpr[19]},
{"R20", r.Regs.Gpr[20]},
{"R21", r.Regs.Gpr[21]},
{"R22", r.Regs.Gpr[22]},
{"R23", r.Regs.Gpr[23]},
{"R24", r.Regs.Gpr[24]},
{"R25", r.Regs.Gpr[25]},
{"R26", r.Regs.Gpr[26]},
{"R27", r.Regs.Gpr[27]},
{"R28", r.Regs.Gpr[28]},
{"R29", r.Regs.Gpr[29]},
{"R30", r.Regs.Gpr[30]},
{"R31", r.Regs.Gpr[31]},
{"Nip", r.Regs.Nip},
{"MSr", r.Regs.Msr},
{"Orig_gpr3", r.Regs.Orig_gpr3},
{"Ctr", r.Regs.Ctr},
{"Link", r.Regs.Link},
{"Xer", r.Regs.Xer},
{"Ccr", r.Regs.Ccr},
{"Softe", r.Regs.Softe},
{"Trap", r.Regs.Trap},
{"Dar", r.Regs.Dar},
{"Dsisr", r.Regs.Dsisr},
{"Result", r.Regs.Result},
}
out := make([]proc.Register, 0, len(regs)+len(r.Fpregs))
for _, reg := range regs {
out = proc.AppendUint64Register(out, reg.k, reg.v)
}
var floatLoadError error
if floatingPoint {
if r.loadFpRegs != nil {
floatLoadError = r.loadFpRegs(r)
r.loadFpRegs = nil
}
out = append(out, r.Fpregs...)
}
return out, floatLoadError
}
// Copy returns a copy of these registers that is guaranteed not to change.
func (r *PPC64LERegisters) Copy() (proc.Registers, error) {
if r.loadFpRegs != nil {
err := r.loadFpRegs(r)
r.loadFpRegs = nil
if err != nil {
return nil, err
}
}
var rr PPC64LERegisters
rr.Regs = &PPC64LEPtraceRegs{}
*(rr.Regs) = *(r.Regs)
if r.Fpregs != nil {
rr.Fpregs = make([]proc.Register, len(r.Fpregs))
copy(rr.Fpregs, r.Fpregs)
}
if r.Fpregset != nil {
rr.Fpregset = make([]byte, len(r.Fpregset))
copy(rr.Fpregset, r.Fpregset)
}
return &rr, nil
}
type PPC64LEPtraceFpRegs struct {
Fp []byte
}
func (fpregs *PPC64LEPtraceFpRegs) Decode() (regs []proc.Register) {
for i := 0; i < len(fpregs.Fp); i += 16 {
regs = proc.AppendBytesRegister(regs, fmt.Sprintf("V%d", i/16), fpregs.Fp[i:i+16])
}
return
}

@ -1,5 +1,5 @@
//go:build (linux && 386) || (darwin && arm64) || (windows && arm64)
// +build linux,386 darwin,arm64 windows,arm64
//go:build (linux && 386) || (darwin && arm64) || (windows && arm64) || (linux && ppc64le)
// +build linux,386 darwin,arm64 windows,arm64 linux,ppc64le
package native

@ -321,7 +321,11 @@ func (dbp *nativeProcess) initialize(path string, debugInfoDirs []string) (*proc
// look like the breakpoint was hit twice when it was "logically" only
// executed once.
// See: https://go-review.googlesource.com/c/go/+/208126
DisableAsyncPreempt: runtime.GOOS == "windows" || (runtime.GOOS == "linux" && runtime.GOARCH == "arm64"),
// - on linux/ppc64le according to @laboger, they had issues in the past
// with gdb once AsyncPreempt was enabled. While implementing the port,
// few tests failed while it was enabled, but cannot be warrantied that
// disabling it fixed the issues.
DisableAsyncPreempt: runtime.GOOS == "windows" || (runtime.GOOS == "linux" && runtime.GOARCH == "arm64") || (runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le"),
StopReason: stopReason,
CanDump: runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || (runtime.GOOS == "windows" && runtime.GOARCH == "amd64"),
@ -331,7 +335,7 @@ func (dbp *nativeProcess) initialize(path string, debugInfoDirs []string) (*proc
if err != nil {
return nil, err
}
if dbp.bi.Arch.Name == "arm64" {
if dbp.bi.Arch.Name == "arm64" || dbp.bi.Arch.Name == "ppc64le" {
dbp.iscgo = tgt.IsCgo()
}
return grp, nil

@ -1,5 +1,5 @@
//go:build (linux && amd64) || (linux && arm64)
// +build linux,amd64 linux,arm64
//go:build (linux && amd64) || (linux && arm64) || (linux && ppc64le)
// +build linux,amd64 linux,arm64 linux,ppc64le
package native

@ -0,0 +1,104 @@
package native
import (
"debug/elf"
"syscall"
"unsafe"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/regnum"
"github.com/go-delve/delve/pkg/proc"
"github.com/go-delve/delve/pkg/proc/linutil"
sys "golang.org/x/sys/unix"
)
const (
_PPC64LE_GPREGS_SIZE = 44 * 8
_PPC64LE_FPREGS_SIZE = 33*8 + 8
)
func ptraceGetGRegs(pid int, regs *linutil.PPC64LEPtraceRegs) (err error) {
sys.PtraceGetRegs(pid, (*sys.PtraceRegs)(regs))
if err == syscall.Errno(0) {
err = nil
}
return
}
func ptraceSetGRegs(pid int, regs *linutil.PPC64LEPtraceRegs) (err error) {
sys.PtraceSetRegs(pid, (*sys.PtraceRegs)(regs))
if err == syscall.Errno(0) {
err = nil
}
return
}
func ptraceGetFpRegset(tid int) (fpregset []byte, err error) {
var ppc64leFpregs [_PPC64LE_FPREGS_SIZE]byte
iov := sys.Iovec{Base: &ppc64leFpregs[0], Len: _PPC64LE_FPREGS_SIZE}
_, _, err = syscall.Syscall6(syscall.SYS_PTRACE, sys.PTRACE_GETREGSET, uintptr(tid), uintptr(elf.NT_FPREGSET), uintptr(unsafe.Pointer(&iov)), 0, 0)
if err != syscall.Errno(0) {
if err == syscall.ENODEV {
err = nil
}
return
} else {
err = nil
}
fpregset = ppc64leFpregs[:iov.Len-8]
return fpregset, err
}
// SetPC sets PC to the value specified by 'pc'.
func (t *nativeThread) setPC(pc uint64) error {
ir, err := registers(t)
if err != nil {
return err
}
r := ir.(*linutil.PPC64LERegisters)
r.Regs.Nip = pc
t.dbp.execPtraceFunc(func() { err = ptraceSetGRegs(t.ID, r.Regs) })
return err
}
// SetReg changes the value of the specified register.
func (t *nativeThread) SetReg(regNum uint64, reg *op.DwarfRegister) error {
ir, err := registers(t)
if err != nil {
return err
}
r := ir.(*linutil.PPC64LERegisters)
switch regNum {
case regnum.PPC64LE_PC:
r.Regs.Nip = reg.Uint64Val
case regnum.PPC64LE_SP:
r.Regs.Gpr[1] = reg.Uint64Val
case regnum.PPC64LE_LR:
r.Regs.Link = reg.Uint64Val
default:
panic("SetReg")
}
t.dbp.execPtraceFunc(func() { err = ptraceSetGRegs(t.ID, r.Regs) })
return err
}
func registers(thread *nativeThread) (proc.Registers, error) {
var (
regs linutil.PPC64LEPtraceRegs
err error
)
thread.dbp.execPtraceFunc(func() { err = ptraceGetGRegs(thread.ID, &regs) })
if err != nil {
return nil, err
}
r := linutil.NewPPC64LERegisters(&regs, func(r *linutil.PPC64LERegisters) error {
var floatLoadError error
r.Fpregs, r.Fpregset, floatLoadError = thread.fpRegisters()
return floatLoadError
})
return r, nil
}

@ -1,6 +1,10 @@
// This file is used to detect build on unsupported GOOS/GOARCH combinations.
//go:build linux && !amd64 && !arm64 && !386
// +build linux,!amd64,!arm64,!386
//go:build linux && !amd64 && !arm64 && !386 && !(ppc64le && exp.linuxppc64le)
// +build linux
// +build !amd64
// +build !arm64
// +build !386
// +build !ppc64le !exp.linuxppc64le
package your_linux_architecture_is_not_supported_by_delve

@ -0,0 +1,25 @@
package native
import (
"fmt"
"github.com/go-delve/delve/pkg/proc"
"github.com/go-delve/delve/pkg/proc/linutil"
)
func (t *nativeThread) fpRegisters() ([]proc.Register, []byte, error) {
var regs []proc.Register
var fpregs linutil.PPC64LEPtraceFpRegs
var err error
t.dbp.execPtraceFunc(func() { fpregs.Fp, err = ptraceGetFpRegset(t.ID) })
regs = fpregs.Decode()
if err != nil {
err = fmt.Errorf("could not get floating point registers: %v", err.Error())
}
return regs, fpregs.Fp, err
}
func (t *nativeThread) restoreRegisters(savedRegs proc.Registers) error {
panic("Unimplemented restoreRegisters method in threads_linux_ppc64le.go")
}

234
pkg/proc/ppc64le_arch.go Normal file

@ -0,0 +1,234 @@
package proc
import (
"encoding/binary"
"fmt"
"strings"
"github.com/go-delve/delve/pkg/dwarf/frame"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/regnum"
)
// This is the unconditional trap, the same mnemonic that both clang and gcc use
// It's documented in Section C.6 Trap Mnemonics in the Power ISA Book 3
var ppc64leBreakInstruction = []byte{0x08, 0x00, 0xe0, 0x7f}
func PPC64LEArch(goos string) *Arch {
return &Arch{
Name: "ppc64le",
ptrSize: 8,
maxInstructionLength: 4,
breakpointInstruction: ppc64leBreakInstruction,
breakInstrMovesPC: false,
derefTLS: false, // Chapter 3.7 of the ELF V2 ABI Specification
prologues: prologuesPPC64LE,
fixFrameUnwindContext: ppc64leFixFrameUnwindContext,
switchStack: ppc64leSwitchStack,
regSize: ppc64leRegSize,
RegistersToDwarfRegisters: ppc64leRegistersToDwarfRegisters,
addrAndStackRegsToDwarfRegisters: ppc64leAddrAndStackRegsToDwarfRegisters,
DwarfRegisterToString: ppc64leDwarfRegisterToString,
inhibitStepInto: func(*BinaryInfo, uint64) bool { return false },
asmDecode: ppc64leAsmDecode,
usesLR: true,
PCRegNum: regnum.PPC64LE_PC,
SPRegNum: regnum.PPC64LE_SP,
LRRegNum: regnum.PPC64LE_LR,
asmRegisters: ppc64leAsmRegisters,
RegisterNameToDwarf: nameToDwarfFunc(regnum.PPC64LENameToDwarf),
}
}
func ppc64leFixFrameUnwindContext(fctxt *frame.FrameContext, pc uint64, bi *BinaryInfo) *frame.FrameContext {
a := bi.Arch
if a.sigreturnfn == nil {
a.sigreturnfn = bi.lookupOneFunc("runtime.sigreturn")
}
if fctxt == nil || (a.sigreturnfn != nil && pc >= a.sigreturnfn.Entry && pc < a.sigreturnfn.End) {
return &frame.FrameContext{
RetAddrReg: regnum.PPC64LE_LR,
Regs: map[uint64]frame.DWRule{
regnum.PPC64LE_PC: {
Rule: frame.RuleOffset,
Offset: int64(-a.PtrSize()),
},
regnum.PPC64LE_LR: {
Rule: frame.RuleOffset,
Offset: int64(-2 * a.PtrSize()),
},
regnum.PPC64LE_SP: {
Rule: frame.RuleValOffset,
Offset: 0,
},
},
CFA: frame.DWRule{
Rule: frame.RuleCFA,
Reg: regnum.PPC64LE_SP,
Offset: int64(2 * a.PtrSize()),
},
}
}
if a.crosscall2fn == nil {
// This is used to fix issues with the c calling frames
a.crosscall2fn = bi.lookupOneFunc("crosscall2")
}
// Checks if we marked the function as a crosscall and if we are currently in it
if a.crosscall2fn != nil && pc >= a.crosscall2fn.Entry && pc < a.crosscall2fn.End {
rule := fctxt.CFA
if rule.Offset == crosscall2SPOffsetBad {
// Linux support only
rule.Offset += crosscall2SPOffsetLinuxPPC64LE
}
fctxt.CFA = rule
}
if fctxt.Regs[regnum.PPC64LE_LR].Rule == frame.RuleUndefined {
fctxt.Regs[regnum.PPC64LE_LR] = frame.DWRule{
Rule: frame.RuleFramePointer,
Reg: regnum.PPC64LE_LR,
Offset: 0,
}
}
return fctxt
}
const ppc64cgocallSPOffsetSaveSlot = 32
const ppc64prevG0schedSPOffsetSaveSlot = 40
func ppc64leSwitchStack(it *stackIterator, callFrameRegs *op.DwarfRegisters) bool {
if it.frame.Current.Fn == nil && it.systemstack && it.g != nil && it.top {
it.switchToGoroutineStack()
return true
}
if it.frame.Current.Fn != nil {
switch it.frame.Current.Fn.Name {
case "runtime.asmcgocall", "runtime.cgocallback_gofunc", "runtime.sigpanic", "runtime.cgocallback":
//do nothing
case "runtime.goexit", "runtime.rt0_go", "runtime.mcall":
// Look for "top of stack" functions.
it.atend = true
return true
case "crosscall2":
//The offsets get from runtime/cgo/asm_ppc64x.s:10
newsp, _ := readUintRaw(it.mem, it.regs.SP()+8*24, int64(it.bi.Arch.PtrSize()))
newbp, _ := readUintRaw(it.mem, it.regs.SP()+8*14, int64(it.bi.Arch.PtrSize()))
newlr, _ := readUintRaw(it.mem, it.regs.SP()+16, int64(it.bi.Arch.PtrSize()))
if it.regs.Reg(it.regs.BPRegNum) != nil {
it.regs.Reg(it.regs.BPRegNum).Uint64Val = newbp
} else {
reg, _ := it.readRegisterAt(it.regs.BPRegNum, it.regs.SP()+8*14)
it.regs.AddReg(it.regs.BPRegNum, reg)
}
it.regs.Reg(it.regs.LRRegNum).Uint64Val = newlr
it.regs.Reg(it.regs.SPRegNum).Uint64Val = newsp
it.pc = newlr
return true
default:
if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.fatalthrow" {
// The runtime switches to the system stack in multiple places.
// This usually happens through a call to runtime.systemstack but there
// are functions that switch to the system stack manually (for example
// runtime.morestack).
// Since we are only interested in printing the system stack for cgo
// calls we switch directly to the goroutine stack if we detect that the
// function at the top of the stack is a runtime function.
it.switchToGoroutineStack()
return true
}
}
}
fn := it.bi.PCToFunc(it.frame.Ret)
if fn == nil {
return false
}
switch fn.Name {
case "runtime.asmcgocall":
if !it.systemstack {
return false
}
// This function is called by a goroutine to execute a C function and
// switches from the goroutine stack to the system stack.
// Since we are unwinding the stack from callee to caller we have to switch
// from the system stack to the goroutine stack.
off, _ := readIntRaw(it.mem,
callFrameRegs.SP()+ppc64cgocallSPOffsetSaveSlot,
int64(it.bi.Arch.PtrSize()))
oldsp := callFrameRegs.SP()
newsp := uint64(int64(it.stackhi) - off)
// runtime.asmcgocall can also be called from inside the system stack,
// in that case no stack switch actually happens
if newsp == oldsp {
return false
}
it.systemstack = false
callFrameRegs.Reg(callFrameRegs.SPRegNum).Uint64Val = uint64(int64(newsp))
return false
case "runtime.cgocallback_gofunc", "runtime.cgocallback":
// For a detailed description of how this works read the long comment at
// the start of $GOROOT/src/runtime/cgocall.go and the source code of
// runtime.cgocallback_gofunc in $GOROOT/src/runtime/asm_ppc64.s
//
// When a C functions calls back into go it will eventually call into
// runtime.cgocallback_gofunc which is the function that does the stack
// switch from the system stack back into the goroutine stack
// Since we are going backwards on the stack here we see the transition
// as goroutine stack -> system stack.
if it.systemstack {
return false
}
it.loadG0SchedSP()
if it.g0_sched_sp <= 0 {
return false
}
// entering the system stack
callFrameRegs.Reg(callFrameRegs.SPRegNum).Uint64Val = it.g0_sched_sp
// reads the previous value of g0.sched.sp that runtime.cgocallback_gofunc saved on the stack
// TODO: is this save slot correct?
it.g0_sched_sp, _ = readUintRaw(it.mem, callFrameRegs.SP()+ppc64prevG0schedSPOffsetSaveSlot, int64(it.bi.Arch.PtrSize()))
it.systemstack = true
return false
}
return false
}
// ppc64leRegSize returns the size (in bytes) of register regnum.
func ppc64leRegSize(regnum uint64) int {
return 8 // each register is a 64-bit register
}
func ppc64leRegistersToDwarfRegisters(staticBase uint64, regs Registers) *op.DwarfRegisters {
dregs := initDwarfRegistersFromSlice(int(regnum.PPC64LEMaxRegNum()), regs, regnum.PPC64LENameToDwarf)
dr := op.NewDwarfRegisters(staticBase, dregs, binary.LittleEndian, regnum.PPC64LE_PC, regnum.PPC64LE_SP, regnum.PPC64LE_SP, regnum.PPC64LE_LR)
dr.SetLoadMoreCallback(loadMoreDwarfRegistersFromSliceFunc(dr, regs, regnum.PPC64LENameToDwarf))
return dr
}
func ppc64leAddrAndStackRegsToDwarfRegisters(staticBase, pc, sp, bp, lr uint64) op.DwarfRegisters {
dregs := make([]*op.DwarfRegister, regnum.PPC64LE_LR+1)
dregs[regnum.PPC64LE_PC] = op.DwarfRegisterFromUint64(pc)
dregs[regnum.PPC64LE_SP] = op.DwarfRegisterFromUint64(sp)
dregs[regnum.PPC64LE_LR] = op.DwarfRegisterFromUint64(lr)
return *op.NewDwarfRegisters(staticBase, dregs, binary.LittleEndian, regnum.PPC64LE_PC, regnum.PPC64LE_SP, 0, regnum.PPC64LE_LR)
}
func ppc64leDwarfRegisterToString(i int, reg *op.DwarfRegister) (name string, floatingPoint bool, repr string) {
name = regnum.PPC64LEToName(uint64(i))
if reg == nil {
return name, false, ""
}
if reg.Bytes == nil || (reg.Bytes != nil && len(reg.Bytes) < 16) {
return name, false, fmt.Sprintf("%#016x", reg.Uint64Val)
}
return name, true, fmt.Sprintf("%#x", reg.Bytes)
}

161
pkg/proc/ppc64le_disasm.go Normal file

@ -0,0 +1,161 @@
package proc
import (
"encoding/binary"
"github.com/go-delve/delve/pkg/dwarf/op"
"github.com/go-delve/delve/pkg/dwarf/regnum"
"golang.org/x/arch/ppc64/ppc64asm"
)
// Possible stacksplit prologues are inserted by stacksplit in
// $GOROOT/src/cmd/internal/obj/ppc64/obj9.go.
var prologuesPPC64LE []opcodeSeq
func init() {
// Note: these will be the gnu opcodes and not the Go opcodes. Verify the sequences are as expected.
var tinyStacksplit = opcodeSeq{uint64(ppc64asm.ADDI), uint64(ppc64asm.CMPLD), uint64(ppc64asm.BC)}
var smallStacksplit = opcodeSeq{uint64(ppc64asm.ADDI), uint64(ppc64asm.CMPLD), uint64(ppc64asm.BC)}
var bigStacksplit = opcodeSeq{uint64(ppc64asm.ADDI), uint64(ppc64asm.CMPLD), uint64(ppc64asm.BC), uint64(ppc64asm.STD), uint64(ppc64asm.STD), uint64(ppc64asm.MFSPR)}
var unixGetG = opcodeSeq{uint64(ppc64asm.LD)}
prologuesPPC64LE = make([]opcodeSeq, 0, 3)
for _, getG := range []opcodeSeq{unixGetG} {
for _, stacksplit := range []opcodeSeq{tinyStacksplit, smallStacksplit, bigStacksplit} {
prologue := make(opcodeSeq, 0, len(getG)+len(stacksplit))
prologue = append(prologue, getG...)
prologue = append(prologue, stacksplit...)
prologuesPPC64LE = append(prologuesPPC64LE, prologue)
}
}
}
func ppc64leAsmDecode(asmInst *AsmInstruction, mem []byte, regs *op.DwarfRegisters, memrw MemoryReadWriter, bi *BinaryInfo) error {
asmInst.Size = 4
asmInst.Bytes = mem[:asmInst.Size]
inst, err := ppc64asm.Decode(mem, binary.LittleEndian)
if err != nil {
asmInst.Inst = (*ppc64ArchInst)(nil)
return err
}
asmInst.Inst = (*ppc64ArchInst)(&inst)
asmInst.Kind = OtherInstruction
switch inst.Op {
case ppc64asm.BL, ppc64asm.BLA, ppc64asm.BCL, ppc64asm.BCLA, ppc64asm.BCLRL, ppc64asm.BCCTRL, ppc64asm.BCTARL:
// Pages 38-40 Book I v3.0
asmInst.Kind = CallInstruction
case ppc64asm.RFEBB, ppc64asm.RFID, ppc64asm.HRFID, ppc64asm.RFI, ppc64asm.RFCI, ppc64asm.RFDI, ppc64asm.RFMCI, ppc64asm.RFGI, ppc64asm.BCLR:
asmInst.Kind = RetInstruction
case ppc64asm.B, ppc64asm.BA, ppc64asm.BC, ppc64asm.BCA, ppc64asm.BCCTR, ppc64asm.BCTAR:
// Pages 38-40 Book I v3.0
asmInst.Kind = JmpInstruction
case ppc64asm.TD, ppc64asm.TDI, ppc64asm.TW, ppc64asm.TWI:
asmInst.Kind = HardBreakInstruction
}
asmInst.DestLoc = resolveCallArgPPC64LE(&inst, asmInst.Loc.PC, asmInst.AtPC, regs, memrw, bi)
return nil
}
func resolveCallArgPPC64LE(inst *ppc64asm.Inst, instAddr uint64, currentGoroutine bool, regs *op.DwarfRegisters, mem MemoryReadWriter, bininfo *BinaryInfo) *Location {
switch inst.Op {
case ppc64asm.BCLRL, ppc64asm.BCLR:
if regs != nil && regs.PC() == instAddr {
pc := regs.Reg(bininfo.Arch.LRRegNum).Uint64Val
file, line, fn := bininfo.PCToLine(pc)
if fn == nil {
return &Location{PC: pc}
}
return &Location{PC: pc, File: file, Line: line, Fn: fn}
}
return nil
case ppc64asm.B, ppc64asm.BL, ppc64asm.BLA, ppc64asm.BCL, ppc64asm.BCLA, ppc64asm.BCCTRL, ppc64asm.BCTARL:
// ok
default:
return nil
}
var pc uint64
var err error
switch arg := inst.Args[0].(type) {
case ppc64asm.Imm:
pc = uint64(arg)
case ppc64asm.Reg:
if !currentGoroutine || regs == nil {
return nil
}
pc, err = bininfo.Arch.getAsmRegister(regs, int(arg))
if err != nil {
return nil
}
case ppc64asm.PCRel:
pc = instAddr + uint64(arg)
default:
return nil
}
file, line, fn := bininfo.PCToLine(pc)
if fn == nil {
return &Location{PC: pc}
}
return &Location{PC: pc, File: file, Line: line, Fn: fn}
}
type ppc64ArchInst ppc64asm.Inst
func (inst *ppc64ArchInst) Text(flavour AssemblyFlavour, pc uint64, symLookup func(uint64) (string, uint64)) string {
if inst == nil {
return "?"
}
var text string
switch flavour {
case GNUFlavour:
text = ppc64asm.GNUSyntax(ppc64asm.Inst(*inst), pc)
default:
text = ppc64asm.GoSyntax(ppc64asm.Inst(*inst), pc, symLookup)
}
return text
}
func (inst *ppc64ArchInst) OpcodeEquals(op uint64) bool {
if inst == nil {
return false
}
return uint64(inst.Op) == op
}
var ppc64leAsmRegisters = func() map[int]asmRegister {
r := make(map[int]asmRegister)
// General Purpose Registers: from R0 to R31
for i := ppc64asm.R0; i <= ppc64asm.R31; i++ {
r[int(i)] = asmRegister{regnum.PPC64LE_R0 + uint64(i-ppc64asm.R0), 0, 0}
}
// Floating point registers: from F0 to F31
for i := ppc64asm.F0; i <= ppc64asm.F31; i++ {
r[int(i)] = asmRegister{regnum.PPC64LE_F0 + uint64(i-ppc64asm.F0), 0, 0}
}
// Vector (Altivec/VMX) registers: from V0 to V31
for i := ppc64asm.V0; i <= ppc64asm.V31; i++ {
r[int(i)] = asmRegister{regnum.PPC64LE_V0 + uint64(i-ppc64asm.V0), 0, 0}
}
// Vector Scalar (VSX) registers: from VS0 to VS63
for i := ppc64asm.VS0; i <= ppc64asm.VS63; i++ {
r[int(i)] = asmRegister{regnum.PPC64LE_VS0 + uint64(i-ppc64asm.VS0), 0, 0}
}
// Condition Registers: from CR0 to CR7
for i := ppc64asm.CR0; i <= ppc64asm.CR7; i++ {
r[int(i)] = asmRegister{regnum.PPC64LE_CR0 + uint64(i-ppc64asm.CR0), 0, 0}
}
return r
}()

@ -809,6 +809,7 @@ func TestFindReturnAddress(t *testing.T) {
}
func TestFindReturnAddressTopOfStackFn(t *testing.T) {
skipOn(t, "broken in linux ppc64le", "linux", "ppc64le", "native")
protest.AllowRecording(t)
withTestProcess("testreturnaddress", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
fnName := "runtime.rt0_go"
@ -903,6 +904,7 @@ func (l1 *loc) match(l2 proc.Stackframe) bool {
}
func TestStacktrace(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
stacks := [][]loc{
{{4, "main.stacktraceme"}, {8, "main.func1"}, {16, "main.main"}},
{{4, "main.stacktraceme"}, {8, "main.func1"}, {12, "main.func2"}, {17, "main.main"}},
@ -987,6 +989,7 @@ func stackMatch(stack []loc, locations []proc.Stackframe, skipRuntime bool) bool
func TestStacktraceGoroutine(t *testing.T) {
skipOn(t, "broken - cgo stacktraces", "darwin", "arm64")
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
mainStack := []loc{{14, "main.stacktraceme"}, {29, "main.main"}}
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) {
@ -1311,6 +1314,7 @@ func TestVariableEvaluation(t *testing.T) {
}
func TestFrameEvaluation(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
protest.AllowRecording(t)
lenient := false
if runtime.GOOS == "windows" {
@ -2303,6 +2307,7 @@ func TestNextDeferReturnAndDirectCall(t *testing.T) {
}
func TestNextPanicAndDirectCall(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Next should not step into a deferred function if it is called
// directly, only if it is called through a panic or a deferreturn.
// Here we test the case where the function is called by a panic
@ -2320,6 +2325,7 @@ func TestStepCall(t *testing.T) {
}
func TestStepCallPtr(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Tests that Step works correctly when calling functions with a
// function pointer.
if goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) && !protest.RegabiSupported() {
@ -2339,6 +2345,7 @@ func TestStepCallPtr(t *testing.T) {
}
func TestStepReturnAndPanic(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Tests that Step works correctly when returning from functions
// and when a deferred function is called when panic'ing.
testseq("defercall", contStep, []nextTest{
@ -2350,6 +2357,7 @@ func TestStepReturnAndPanic(t *testing.T) {
}
func TestStepDeferReturn(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Tests that Step works correctly when a deferred function is
// called during a return.
testseq("defercall", contStep, []nextTest{
@ -2364,6 +2372,7 @@ func TestStepDeferReturn(t *testing.T) {
}
func TestStepIgnorePrivateRuntime(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Tests that Step will ignore calls to private runtime functions
// (such as runtime.convT2E in this case)
switch {
@ -2742,6 +2751,7 @@ func TestIssue594(t *testing.T) {
}
func TestStepOutPanicAndDirectCall(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// StepOut should not step into a deferred function if it is called
// directly, only if it is called through a panic.
// Here we test the case where the function is called by a panic
@ -3170,6 +3180,7 @@ func TestDebugStripped(t *testing.T) {
// TODO(derekparker): Add support for Mach-O and PE.
skipUnlessOn(t, "linux only", "linux")
skipOn(t, "not working on linux/386 with PIE", "linux", "386", "pie")
skipOn(t, "not working on linux/ppc64le when -gcflags=-N -l is passed", "linux", "ppc64le")
withTestProcessArgs("testnextprog", t, "", []string{}, protest.LinkStrip, func(p *proc.Target, grp *proc.TargetGroup, f protest.Fixture) {
setFunctionBreakpoint(p, t, "main.main")
assertNoError(grp.Continue(), t, "Continue")
@ -3308,6 +3319,8 @@ func TestCgoStacktrace(t *testing.T) {
}
}
skipOn(t, "broken - cgo stacktraces", "386")
skipOn(t, "broken - cgo stacktraces", "windows", "arm64")
skipOn(t, "broken - cgo stacktraces", "linux", "ppc64le")
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 21) {
skipOn(t, "broken - cgo stacktraces", "windows", "arm64")
}
@ -3437,6 +3450,7 @@ func TestCgoSources(t *testing.T) {
}
func TestSystemstackStacktrace(t *testing.T) {
skipOn(t, "broken", "ppc64le")
// check that we can follow a stack switch initiated by runtime.systemstack()
withTestProcess("panic", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "runtime.startpanic_m")
@ -3455,6 +3469,7 @@ func TestSystemstackStacktrace(t *testing.T) {
}
func TestSystemstackOnRuntimeNewstack(t *testing.T) {
skipOn(t, "broken", "ppc64le")
// The bug being tested here manifests as follows:
// - set a breakpoint somewhere or interrupt the program with Ctrl-C
// - try to look at stacktraces of other goroutines
@ -3692,7 +3707,8 @@ func TestHaltKeepsSteppingBreakpoints(t *testing.T) {
}
func TestDisassembleGlobalVars(t *testing.T) {
skipOn(t, "broken - global variable symbolication", "arm64") // On ARM64 symLookup can't look up variables due to how they are loaded, see issue #1778
skipOn(t, "broken - global variable symbolication", "arm64") // On ARM64 symLookup can't look up variables due to how they are loaded, see issue #1778
skipOn(t, "broken - global variable symbolication", "ppc64le") // See comment on ARM64 above.
// On 386 linux when pie, the generated code use __x86.get_pc_thunk to ensure position-independent.
// Locate global variable by
// `CALL __x86.get_pc_thunk.ax(SB) 0xb0f7f
@ -3878,6 +3894,7 @@ func TestInlinedStacktraceAndVariables(t *testing.T) {
}
func TestInlineStep(t *testing.T) {
skipOn(t, "broken", "ppc64le")
if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 10, Rev: -1}) {
// Versions of go before 1.10 do not have DWARF information for inlined calls
t.Skip("inlining not supported")
@ -4038,6 +4055,7 @@ func TestIssue951(t *testing.T) {
}
func TestDWZCompression(t *testing.T) {
skipOn(t, "broken", "ppc64le")
// If dwz is not available in the system, skip this test
if _, err := exec.LookPath("dwz"); err != nil {
t.Skip("dwz not installed")
@ -4610,6 +4628,7 @@ func TestCgoStacktrace2(t *testing.T) {
skipOn(t, "broken", "386")
}
skipOn(t, "broken - cgo stacktraces", "darwin", "arm64")
skipOn(t, "broken", "ppc64le")
protest.MustHaveCgo(t)
// If a panic happens during cgo execution the stacktrace should show the C
// function that caused the problem.
@ -4718,6 +4737,7 @@ func TestIssue1795(t *testing.T) {
if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 13) {
t.Skip("Test not relevant to Go < 1.13")
}
skipOn(t, "broken", "ppc64le")
withTestProcessArgs("issue1795", t, ".", []string{}, protest.EnableInlining|protest.EnableOptimization, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
assertNoError(grp.Continue(), t, "Continue()")
assertLineNumber(p, t, 12, "wrong line number after Continue,")
@ -5149,6 +5169,7 @@ func TestDump(t *testing.T) {
if (runtime.GOOS == "darwin" && testBackend == "native") || (runtime.GOOS == "windows" && runtime.GOARCH != "amd64") {
t.Skip("not supported")
}
skipOn(t, "not implemented", "ppc64le")
convertRegisters := func(arch *proc.Arch, dregs op.DwarfRegisters) string {
dregs.Reg(^uint64(0))
@ -5398,6 +5419,7 @@ func TestVariablesWithExternalLinking(t *testing.T) {
func TestWatchpointsBasic(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "not implemented", "ppc64le")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
protest.AllowRecording(t)
@ -5458,6 +5480,7 @@ func TestWatchpointCounts(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
skipOn(t, "not implemented", "ppc64le")
protest.AllowRecording(t)
withTestProcess("databpcountstest", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
@ -5545,6 +5568,7 @@ func TestManualStopWhileStopped(t *testing.T) {
}
func TestDwrapStartLocation(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Tests that the start location of a goroutine is unwrapped in Go 1.17 and later.
withTestProcess("goroutinestackprog", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
setFunctionBreakpoint(p, t, "main.stacktraceme")
@ -5572,6 +5596,7 @@ func TestDwrapStartLocation(t *testing.T) {
func TestWatchpointStack(t *testing.T) {
skipOn(t, "not implemented", "freebsd")
skipOn(t, "not implemented", "386")
skipOn(t, "not implemented", "ppc64le")
skipOn(t, "see https://github.com/go-delve/delve/issues/2768", "windows")
protest.AllowRecording(t)
@ -5768,13 +5793,15 @@ func TestNilPtrDerefInBreakInstr(t *testing.T) {
asmfile = "main_arm64.s"
case "386":
asmfile = "main_386.s"
case "ppc64le":
asmfile = "main_ppc64le.s"
default:
t.Fatalf("assembly file for %s not provided", runtime.GOARCH)
}
withTestProcess("asmnilptr/", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
f := filepath.Join(fixture.BuildDir, asmfile)
f = strings.ReplaceAll(f, "\\", "/")
f = strings.Replace(f, "\\", "/", -1)
setFileBreakpoint(p, t, f, 5)
t.Logf("first continue")
assertNoError(grp.Continue(), t, "Continue()")
@ -6042,6 +6069,7 @@ func TestEscapeCheckUnreadable(t *testing.T) {
}
func TestStepShadowConcurrentBreakpoint(t *testing.T) {
skipOn(t, "broken - pie mode", "linux", "ppc64le", "native", "pie")
// Checks that a StepBreakpoint can not shadow a concurrently hit user breakpoint
withTestProcess("stepshadow", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
break2 := setFunctionBreakpoint(p, t, "main.stacktraceme2")

@ -280,7 +280,7 @@ func (it *stackIterator) switchToGoroutineStack() {
it.pc = it.g.PC
it.regs.Reg(it.regs.SPRegNum).Uint64Val = it.g.SP
it.regs.AddReg(it.regs.BPRegNum, op.DwarfRegisterFromUint64(it.g.BP))
if it.bi.Arch.Name == "arm64" {
if it.bi.Arch.Name == "arm64" || it.bi.Arch.Name == "ppc64le" {
it.regs.Reg(it.regs.LRRegNum).Uint64Val = it.g.LR
}
}
@ -475,7 +475,7 @@ func (it *stackIterator) advanceRegs() (callFrameRegs op.DwarfRegisters, ret uin
// In the following line we copy GDB's behaviour by assuming this is
// implicit.
// See also the comment in dwarf2_frame_default_init in
// $GDB_SOURCE/dwarf2-frame.c
// $GDB_SOURCE/dwarf2/frame.c
callFrameRegs.AddReg(callFrameRegs.SPRegNum, cfareg)
for i, regRule := range framectx.Regs {
@ -504,7 +504,7 @@ func (it *stackIterator) advanceRegs() (callFrameRegs op.DwarfRegisters, ret uin
}
}
if it.bi.Arch.Name == "arm64" {
if it.bi.Arch.Name == "arm64" || it.bi.Arch.Name == "ppc64le" {
if ret == 0 && it.regs.Reg(it.regs.LRRegNum) != nil {
ret = it.regs.Reg(it.regs.LRRegNum).Uint64Val
}

@ -8,8 +8,11 @@ import (
"go/ast"
"go/token"
"path/filepath"
"runtime"
"strings"
"golang.org/x/arch/ppc64/ppc64asm"
"github.com/go-delve/delve/pkg/astutil"
"github.com/go-delve/delve/pkg/dwarf/reader"
)
@ -913,7 +916,16 @@ func setStepIntoBreakpoint(dbp *Target, curfn *Function, text []AsmInstruction,
return nil
}
pc := instr.DestLoc.PC
fn := instr.DestLoc.Fn
if runtime.GOARCH == "ppc64le" && instr.Inst.OpcodeEquals(uint64(ppc64asm.BCLRL)) {
regs, err := dbp.CurrentThread().Registers()
if err != nil {
return err
}
lr := regs.LR()
fn = dbp.BinInfo().PCToFunc(lr)
}
// Skip unexported runtime functions
if !stepIntoUnexportedRuntime && fn != nil && fn.privateRuntime() {
@ -924,8 +936,6 @@ func setStepIntoBreakpoint(dbp *Target, curfn *Function, text []AsmInstruction,
// or entire packages from being stepped into with 'step'
// those extra checks should be done here.
pc := instr.DestLoc.PC
// Skip InhibitStepInto functions for different arch.
if dbp.BinInfo().Arch.inhibitStepInto(dbp.BinInfo(), pc) {
return nil

@ -310,7 +310,7 @@ func MustSupportFunctionCalls(t *testing.T, testBackend string) {
if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" && runtime.GOARCH == "amd64" {
t.Skip("function call injection tests are failing on macOS on Travis-CI (see #1802)")
}
if runtime.GOARCH == "386" {
if runtime.GOARCH == "386" || runtime.GOARCH == "ppc64le" {
t.Skip(fmt.Errorf("%s does not support FunctionCall for now", runtime.GOARCH))
}
if runtime.GOARCH == "arm64" {

@ -1462,6 +1462,7 @@ func assertCurrentLocationFunction(p *proc.Target, t *testing.T, fnname string)
}
func TestPluginVariables(t *testing.T) {
skipOn(t, "broken", "ppc64le")
pluginFixtures := protest.WithPlugins(t, protest.AllNonOptimized, "plugin1/", "plugin2/")
withTestProcessArgs("plugintest2", t, ".", []string{pluginFixtures[0].Path, pluginFixtures[1].Path}, protest.AllNonOptimized, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
@ -1544,6 +1545,10 @@ func TestCgoEval(t *testing.T) {
t.Skip("cgo doesn't work on darwin/arm64")
}
if runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
protest.AllowRecording(t)
withTestProcess("testvariablescgo/", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) {
assertNoError(grp.Continue(), t, "Continue() returned an error")

@ -305,6 +305,9 @@ func TestExitStatus(t *testing.T) {
}
func TestScopePrefix(t *testing.T) {
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
const goroutinesLinePrefix = " Goroutine "
const goroutinesCurLinePrefix = "* Goroutine "
test.AllowRecording(t)
@ -873,6 +876,9 @@ func TestIssue1090(t *testing.T) {
}
func TestPrintContextParkedGoroutine(t *testing.T) {
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
withTestTerminal("goroutinestackprog", t, func(term *FakeTerminal) {
term.MustExec("break stacktraceme")
term.MustExec("continue")
@ -946,6 +952,9 @@ func TestOptimizationCheck(t *testing.T) {
}
func TestTruncateStacktrace(t *testing.T) {
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
const stacktraceTruncatedMessage = "(truncated)"
withTestTerminal("stacktraceprog", t, func(term *FakeTerminal) {
term.MustExec("break main.stacktraceme")
@ -966,6 +975,9 @@ func TestTruncateStacktrace(t *testing.T) {
func TestIssue1493(t *testing.T) {
// The 'regs' command without the '-a' option should only return
// general purpose registers.
if runtime.GOARCH == "ppc64le" {
t.Skip("skipping, some registers such as vector registers are currently not loaded")
}
withTestTerminal("continuetestprog", t, func(term *FakeTerminal) {
r := term.MustExec("regs")
nr := len(strings.Split(r, "\n"))
@ -1381,6 +1393,9 @@ func TestTranscript(t *testing.T) {
}
func TestDisassPosCmd(t *testing.T) {
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
withTestTerminal("testvariables2", t, func(term *FakeTerminal) {
term.MustExec("continue")
out := term.MustExec("step-instruction")

@ -2458,6 +2458,9 @@ func TestGlobalScopeAndVariables(t *testing.T) {
// got loaded. It then steps into a function in another package and tests that
// the registers were updated by checking PC.
func TestRegistersScopeAndVariables(t *testing.T) {
if runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
runTest(t, "consts", func(client *daptest.Client, fixture protest.Fixture) {
runDebugSessionWithBPs(t, client, "launch",
// Launch

@ -53,6 +53,9 @@ func TestDebugger_LaunchInvalidFormat(t *testing.T) {
if runtime.GOARCH == "arm64" && runtime.GOOS == "linux" {
os.Setenv("GOARCH", "amd64")
}
if runtime.GOARCH == "ppc64le" && runtime.GOOS == "linux" {
os.Setenv("GOARCH", "amd64")
}
os.Setenv("GOOS", switchOS[runtime.GOOS])
exepath := filepath.Join(buildtestdir, debugname)
if err := gobuild.GoBuild(debugname, []string{buildtestdir}, fmt.Sprintf("-o %s", exepath)); err != nil {

@ -36,6 +36,9 @@ func TestDebugger_LaunchNoExecutablePerm(t *testing.T) {
if runtime.GOARCH == "arm64" && runtime.GOOS == "linux" {
os.Setenv("GOARCH", "amd64")
}
if runtime.GOARCH == "ppc64le" && runtime.GOOS == "linux" {
os.Setenv("GOARCH", "amd64")
}
os.Setenv("GOOS", switchOS[runtime.GOOS])
exepath := filepath.Join(buildtestdir, debugname)
defer os.Remove(exepath)

@ -564,6 +564,9 @@ func Test1ClientServer_traceContinue2(t *testing.T) {
}
func Test1ClientServer_FindLocations(t *testing.T) {
if buildMode == "pie" && runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
withTestClient1("locationsprog", t, func(c *rpc1.RPCClient) {
someFunctionCallAddr := findLocationHelper(t, c, "locationsprog.go:26", false, 1, 0)[0]
someFunctionLine1 := findLocationHelper(t, c, "locationsprog.go:27", false, 1, 0)[0]
@ -719,6 +722,9 @@ func Test1ClientServer_FullStacktrace(t *testing.T) {
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
t.Skip("cgo doesn't work on darwin/arm64")
}
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
lenient := false
if runtime.GOOS == "windows" {
@ -858,6 +864,9 @@ func Test1Issue355(t *testing.T) {
}
func Test1Disasm(t *testing.T) {
if runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
// Tests that disassembling by PC, range, and current PC all yield similar results
// Tests that disassembly by current PC will return a disassembly containing the instruction at PC
// Tests that stepping on a calculated CALL instruction will yield a disassembly that contains the

@ -946,6 +946,9 @@ func TestClientServer_traceContinue2(t *testing.T) {
}
func TestClientServer_FindLocations(t *testing.T) {
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
withTestClient2("locationsprog", t, func(c service.Client) {
someFunctionCallAddr := findLocationHelper(t, c, "locationsprog.go:26", false, 1, 0)[0]
someFunctionLine1 := findLocationHelper(t, c, "locationsprog.go:27", false, 1, 0)[0]
@ -1210,6 +1213,9 @@ func TestClientServer_FullStacktrace(t *testing.T) {
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
t.Skip("cgo doesn't work on darwin/arm64")
}
if runtime.GOARCH == "ppc64le" && buildMode == "pie" {
t.Skip("pie mode broken on ppc64le")
}
lenient := false
if runtime.GOOS == "windows" {
@ -1362,6 +1368,9 @@ func TestIssue355(t *testing.T) {
}
func TestDisasm(t *testing.T) {
if runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
// Tests that disassembling by PC, range, and current PC all yield similar results
// Tests that disassembly by current PC will return a disassembly containing the instruction at PC
// Tests that stepping on a calculated CALL instruction will yield a disassembly that contains the
@ -2858,6 +2867,9 @@ func assertLine(t *testing.T, state *api.DebuggerState, file string, lineno int)
}
func TestPluginSuspendedBreakpoint(t *testing.T) {
if runtime.GOARCH == "ppc64le" {
t.Skip("skipped on ppc64le: broken")
}
// Tests that breakpoints created in a suspended state will be enabled automatically when a plugin is loaded.
pluginFixtures := protest.WithPlugins(t, protest.AllNonOptimized, "plugin1/", "plugin2/")
dir, err := filepath.Abs(protest.FindFixturesDir())

179
vendor/golang.org/x/arch/ppc64/ppc64asm/decode.go generated vendored Normal file

@ -0,0 +1,179 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64asm
import (
"encoding/binary"
"fmt"
"log"
)
const debugDecode = false
// instFormat is a decoding rule for one specific instruction form.
// a uint32 instruction ins matches the rule if ins&Mask == Value
// DontCare bits should be zero, but the machine might not reject
// ones in those bits, they are mainly reserved for future expansion
// of the instruction set.
// The Args are stored in the same order as the instruction manual.
type instFormat struct {
Op Op
Mask uint32
Value uint32
DontCare uint32
Args [5]*argField
}
// argField indicate how to decode an argument to an instruction.
// First parse the value from the BitFields, shift it left by Shift
// bits to get the actual numerical value.
type argField struct {
Type ArgType
Shift uint8
BitFields
}
// Parse parses the Arg out from the given binary instruction i.
func (a argField) Parse(i uint32) Arg {
switch a.Type {
default:
return nil
case TypeUnknown:
return nil
case TypeReg:
return R0 + Reg(a.BitFields.Parse(i))
case TypeCondRegBit:
return Cond0LT + CondReg(a.BitFields.Parse(i))
case TypeCondRegField:
return CR0 + CondReg(a.BitFields.Parse(i))
case TypeFPReg:
return F0 + Reg(a.BitFields.Parse(i))
case TypeVecReg:
return V0 + Reg(a.BitFields.Parse(i))
case TypeVecSReg:
return VS0 + Reg(a.BitFields.Parse(i))
case TypeSpReg:
return SpReg(a.BitFields.Parse(i))
case TypeImmSigned:
return Imm(a.BitFields.ParseSigned(i) << a.Shift)
case TypeImmUnsigned:
return Imm(a.BitFields.Parse(i) << a.Shift)
case TypePCRel:
return PCRel(a.BitFields.ParseSigned(i) << a.Shift)
case TypeLabel:
return Label(a.BitFields.ParseSigned(i) << a.Shift)
case TypeOffset:
return Offset(a.BitFields.ParseSigned(i) << a.Shift)
}
}
type ArgType int8
const (
TypeUnknown ArgType = iota
TypePCRel // PC-relative address
TypeLabel // absolute address
TypeReg // integer register
TypeCondRegBit // conditional register bit (0-31)
TypeCondRegField // conditional register field (0-7)
TypeFPReg // floating point register
TypeVecReg // vector register
TypeVecSReg // VSX register
TypeSpReg // special register (depends on Op)
TypeImmSigned // signed immediate
TypeImmUnsigned // unsigned immediate/flag/mask, this is the catch-all type
TypeOffset // signed offset in load/store
TypeLast // must be the last one
)
func (t ArgType) String() string {
switch t {
default:
return fmt.Sprintf("ArgType(%d)", int(t))
case TypeUnknown:
return "Unknown"
case TypeReg:
return "Reg"
case TypeCondRegBit:
return "CondRegBit"
case TypeCondRegField:
return "CondRegField"
case TypeFPReg:
return "FPReg"
case TypeVecReg:
return "VecReg"
case TypeVecSReg:
return "VecSReg"
case TypeSpReg:
return "SpReg"
case TypeImmSigned:
return "ImmSigned"
case TypeImmUnsigned:
return "ImmUnsigned"
case TypePCRel:
return "PCRel"
case TypeLabel:
return "Label"
case TypeOffset:
return "Offset"
}
}
func (t ArgType) GoString() string {
s := t.String()
if t > 0 && t < TypeLast {
return "Type" + s
}
return s
}
var (
// Errors
errShort = fmt.Errorf("truncated instruction")
errUnknown = fmt.Errorf("unknown instruction")
)
var decoderCover []bool
// Decode decodes the leading bytes in src as a single instruction using
// byte order ord.
func Decode(src []byte, ord binary.ByteOrder) (inst Inst, err error) {
if len(src) < 4 {
return inst, errShort
}
if decoderCover == nil {
decoderCover = make([]bool, len(instFormats))
}
inst.Len = 4 // only 4-byte instructions are supported
ui := ord.Uint32(src[:inst.Len])
inst.Enc = ui
for i, iform := range instFormats {
if ui&iform.Mask != iform.Value {
continue
}
if ui&iform.DontCare != 0 {
if debugDecode {
log.Printf("Decode(%#x): unused bit is 1 for Op %s", ui, iform.Op)
}
// to match GNU objdump (libopcodes), we ignore don't care bits
}
for i, argfield := range iform.Args {
if argfield == nil {
break
}
inst.Args[i] = argfield.Parse(ui)
}
inst.Op = iform.Op
if debugDecode {
log.Printf("%#x: search entry %d", ui, i)
continue
}
break
}
if inst.Op == 0 && inst.Enc != 0 {
return inst, errUnknown
}
return inst, nil
}

6
vendor/golang.org/x/arch/ppc64/ppc64asm/doc.go generated vendored Normal file

@ -0,0 +1,6 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ppc64asm implements decoding of 64-bit PowerPC machine code.
package ppc64asm

84
vendor/golang.org/x/arch/ppc64/ppc64asm/field.go generated vendored Normal file

@ -0,0 +1,84 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64asm
import (
"fmt"
"strings"
)
// A BitField is a bit-field in a 32-bit word.
// Bits are counted from 0 from the MSB to 31 as the LSB.
type BitField struct {
Offs uint8 // the offset of the left-most bit.
Bits uint8 // length in bits.
}
func (b BitField) String() string {
if b.Bits > 1 {
return fmt.Sprintf("[%d:%d]", b.Offs, int(b.Offs+b.Bits)-1)
} else if b.Bits == 1 {
return fmt.Sprintf("[%d]", b.Offs)
} else {
return fmt.Sprintf("[%d, len=0]", b.Offs)
}
}
// Parse extracts the bitfield b from i, and return it as an unsigned integer.
// Parse will panic if b is invalid.
func (b BitField) Parse(i uint32) uint32 {
if b.Bits > 32 || b.Bits == 0 || b.Offs > 31 || b.Offs+b.Bits > 32 {
panic(fmt.Sprintf("invalid bitfiled %v", b))
}
return (i >> (32 - b.Offs - b.Bits)) & ((1 << b.Bits) - 1)
}
// ParseSigned extracts the bitfield b from i, and return it as a signed integer.
// ParseSigned will panic if b is invalid.
func (b BitField) ParseSigned(i uint32) int32 {
u := int32(b.Parse(i))
return u << (32 - b.Bits) >> (32 - b.Bits)
}
// BitFields is a series of BitFields representing a single number.
type BitFields []BitField
func (bs BitFields) String() string {
ss := make([]string, len(bs))
for i, bf := range bs {
ss[i] = bf.String()
}
return fmt.Sprintf("<%s>", strings.Join(ss, "|"))
}
func (bs *BitFields) Append(b BitField) {
*bs = append(*bs, b)
}
// parse extracts the bitfields from i, concatenate them and return the result
// as an unsigned integer and the total length of all the bitfields.
// parse will panic if any bitfield in b is invalid, but it doesn't check if
// the sequence of bitfields is reasonable.
func (bs BitFields) parse(i uint32) (u uint32, Bits uint8) {
for _, b := range bs {
u = (u << b.Bits) | b.Parse(i)
Bits += b.Bits
}
return u, Bits
}
// Parse extracts the bitfields from i, concatenate them and return the result
// as an unsigned integer. Parse will panic if any bitfield in b is invalid.
func (bs BitFields) Parse(i uint32) uint32 {
u, _ := bs.parse(i)
return u
}
// Parse extracts the bitfields from i, concatenate them and return the result
// as a signed integer. Parse will panic if any bitfield in b is invalid.
func (bs BitFields) ParseSigned(i uint32) int32 {
u, l := bs.parse(i)
return int32(u) << (32 - l) >> (32 - l)
}

267
vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go generated vendored Normal file

@ -0,0 +1,267 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64asm
import (
"bytes"
"fmt"
"strings"
)
var (
condBit = [4]string{"lt", "gt", "eq", "so"}
condBitNeg = [4]string{"ge", "le", "ne", "so"}
)
// GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils.
// This form typically matches the syntax defined in the Power ISA Reference Manual.
func GNUSyntax(inst Inst, pc uint64) string {
var buf bytes.Buffer
// When there are all 0s, identify them as the disassembler
// in binutils would.
if inst.Enc == 0 {
return ".long 0x0"
} else if inst.Op == 0 {
return "error: unknown instruction"
}
PC := pc
// Special handling for some ops
startArg := 0
sep := " "
switch inst.Op.String() {
case "bc":
bo := gnuArg(&inst, 0, inst.Args[0], PC)
bi := inst.Args[1]
switch bi := bi.(type) {
case CondReg:
if bi >= CR0 {
if bi == CR0 && bo == "16" {
buf.WriteString("bdnz")
}
buf.WriteString(fmt.Sprintf("bc cr%d", bi-CR0))
}
cr := bi / 4
switch bo {
case "4":
bit := condBitNeg[(bi-Cond0LT)%4]
if cr == 0 {
buf.WriteString(fmt.Sprintf("b%s", bit))
} else {
buf.WriteString(fmt.Sprintf("b%s cr%d,", bit, cr))
sep = ""
}
case "12":
bit := condBit[(bi-Cond0LT)%4]
if cr == 0 {
buf.WriteString(fmt.Sprintf("b%s", bit))
} else {
buf.WriteString(fmt.Sprintf("b%s cr%d,", bit, cr))
sep = ""
}
case "8":
bit := condBit[(bi-Cond0LT)%4]
sep = ""
if cr == 0 {
buf.WriteString(fmt.Sprintf("bdnzt %s,", bit))
} else {
buf.WriteString(fmt.Sprintf("bdnzt cr%d,%s,", cr, bit))
}
case "16":
if cr == 0 && bi == Cond0LT {
buf.WriteString("bdnz")
} else {
buf.WriteString(fmt.Sprintf("bdnz cr%d,", cr))
sep = ""
}
}
startArg = 2
default:
fmt.Printf("Unexpected bi: %d for bc with bo: %s\n", bi, bo)
}
startArg = 2
case "mtspr":
opcode := inst.Op.String()
buf.WriteString(opcode[0:2])
switch spr := inst.Args[0].(type) {
case SpReg:
switch spr {
case 1:
buf.WriteString("xer")
startArg = 1
case 8:
buf.WriteString("lr")
startArg = 1
case 9:
buf.WriteString("ctr")
startArg = 1
default:
buf.WriteString("spr")
}
default:
buf.WriteString("spr")
}
case "mfspr":
opcode := inst.Op.String()
buf.WriteString(opcode[0:2])
arg := inst.Args[0]
switch spr := inst.Args[1].(type) {
case SpReg:
switch spr {
case 1:
buf.WriteString("xer ")
buf.WriteString(gnuArg(&inst, 0, arg, PC))
startArg = 2
case 8:
buf.WriteString("lr ")
buf.WriteString(gnuArg(&inst, 0, arg, PC))
startArg = 2
case 9:
buf.WriteString("ctr ")
buf.WriteString(gnuArg(&inst, 0, arg, PC))
startArg = 2
case 268:
buf.WriteString("tb ")
buf.WriteString(gnuArg(&inst, 0, arg, PC))
startArg = 2
default:
buf.WriteString("spr")
}
default:
buf.WriteString("spr")
}
default:
buf.WriteString(inst.Op.String())
}
for i, arg := range inst.Args[:] {
if arg == nil {
break
}
if i < startArg {
continue
}
text := gnuArg(&inst, i, arg, PC)
if text == "" {
continue
}
buf.WriteString(sep)
sep = ","
buf.WriteString(text)
}
return buf.String()
}
// gnuArg formats arg (which is the argIndex's arg in inst) according to GNU rules.
// NOTE: because GNUSyntax is the only caller of this func, and it receives a copy
// of inst, it's ok to modify inst.Args here.
func gnuArg(inst *Inst, argIndex int, arg Arg, pc uint64) string {
// special cases for load/store instructions
if _, ok := arg.(Offset); ok {
if argIndex+1 == len(inst.Args) || inst.Args[argIndex+1] == nil {
panic(fmt.Errorf("wrong table: offset not followed by register"))
}
}
switch arg := arg.(type) {
case Reg:
if isLoadStoreOp(inst.Op) && argIndex == 1 && arg == R0 {
return "0"
}
return arg.String()
case CondReg:
// The CondReg can either be found in a CMP, where the
// condition register field is being set, or in an instruction
// like a branch or isel that is testing a bit in a condition
// register field.
if arg == CR0 && strings.HasPrefix(inst.Op.String(), "cmp") {
return "" // don't show cr0 for cmp instructions
} else if arg >= CR0 {
return fmt.Sprintf("cr%d", int(arg-CR0))
}
bit := condBit[(arg-Cond0LT)%4]
if arg <= Cond0SO {
return bit
}
return fmt.Sprintf("%s cr%d", bit, int(arg-Cond0LT)/4)
case Imm:
return fmt.Sprintf("%d", arg)
case SpReg:
switch int(arg) {
case 1:
return "xer"
case 8:
return "lr"
case 9:
return "ctr"
case 268:
return "tb"
default:
return fmt.Sprintf("%d", int(arg))
}
case PCRel:
// If the arg is 0, use the relative address format.
// Otherwise the pc is meaningful, use absolute address.
if int(arg) == 0 {
return fmt.Sprintf(".%+#x", int(arg))
}
addr := pc + uint64(int64(arg))
return fmt.Sprintf("%#x", addr)
case Label:
return fmt.Sprintf("%#x", uint32(arg))
case Offset:
reg := inst.Args[argIndex+1].(Reg)
removeArg(inst, argIndex+1)
if reg == R0 {
return fmt.Sprintf("%d(0)", int(arg))
}
return fmt.Sprintf("%d(r%d)", int(arg), reg-R0)
}
return fmt.Sprintf("???(%v)", arg)
}
// removeArg removes the arg in inst.Args[index].
func removeArg(inst *Inst, index int) {
for i := index; i < len(inst.Args); i++ {
if i+1 < len(inst.Args) {
inst.Args[i] = inst.Args[i+1]
} else {
inst.Args[i] = nil
}
}
}
// isLoadStoreOp returns true if op is a load or store instruction
func isLoadStoreOp(op Op) bool {
switch op {
case LBZ, LBZU, LBZX, LBZUX:
return true
case LHZ, LHZU, LHZX, LHZUX:
return true
case LHA, LHAU, LHAX, LHAUX:
return true
case LWZ, LWZU, LWZX, LWZUX:
return true
case LWA, LWAX, LWAUX:
return true
case LD, LDU, LDX, LDUX:
return true
case LQ:
return true
case STB, STBU, STBX, STBUX:
return true
case STH, STHU, STHX, STHUX:
return true
case STW, STWU, STWX, STWUX:
return true
case STD, STDU, STDX, STDUX:
return true
case STQ:
return true
case LHBRX, LWBRX, STHBRX, STWBRX:
return true
}
return false
}

344
vendor/golang.org/x/arch/ppc64/ppc64asm/inst.go generated vendored Normal file

@ -0,0 +1,344 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64asm
import (
"bytes"
"fmt"
)
type Inst struct {
Op Op // Opcode mnemonic
Enc uint32 // Raw encoding bits
Len int // Length of encoding in bytes.
Args Args // Instruction arguments, in Power ISA manual order.
}
func (i Inst) String() string {
var buf bytes.Buffer
buf.WriteString(i.Op.String())
for j, arg := range i.Args {
if arg == nil {
break
}
if j == 0 {
buf.WriteString(" ")
} else {
buf.WriteString(", ")
}
buf.WriteString(arg.String())
}
return buf.String()
}
// An Op is an instruction operation.
type Op uint16
func (o Op) String() string {
if int(o) >= len(opstr) || opstr[o] == "" {
return fmt.Sprintf("Op(%d)", int(o))
}
return opstr[o]
}
// An Arg is a single instruction argument, one of these types: Reg, CondReg, SpReg, Imm, PCRel, Label, or Offset.
type Arg interface {
IsArg()
String() string
}
// An Args holds the instruction arguments.
// If an instruction has fewer than 4 arguments,
// the final elements in the array are nil.
type Args [5]Arg
// A Reg is a single register. The zero value means R0, not the absence of a register.
// It also includes special registers.
type Reg uint16
const (
_ Reg = iota
R0
R1
R2
R3
R4
R5
R6
R7
R8
R9
R10
R11
R12
R13
R14
R15
R16
R17
R18
R19
R20
R21
R22
R23
R24
R25
R26
R27
R28
R29
R30
R31
F0
F1
F2
F3
F4
F5
F6
F7
F8
F9
F10
F11
F12
F13
F14
F15
F16
F17
F18
F19
F20
F21
F22
F23
F24
F25
F26
F27
F28
F29
F30
F31
V0 // VSX extension, F0 is V0[0:63].
V1
V2
V3
V4
V5
V6
V7
V8
V9
V10
V11
V12
V13
V14
V15
V16
V17
V18
V19
V20
V21
V22
V23
V24
V25
V26
V27
V28
V29
V30
V31
VS0
VS1
VS2
VS3
VS4
VS5
VS6
VS7
VS8
VS9
VS10
VS11
VS12
VS13
VS14
VS15
VS16
VS17
VS18
VS19
VS20
VS21
VS22
VS23
VS24
VS25
VS26
VS27
VS28
VS29
VS30
VS31
VS32
VS33
VS34
VS35
VS36
VS37
VS38
VS39
VS40
VS41
VS42
VS43
VS44
VS45
VS46
VS47
VS48
VS49
VS50
VS51
VS52
VS53
VS54
VS55
VS56
VS57
VS58
VS59
VS60
VS61
VS62
VS63
)
func (Reg) IsArg() {}
func (r Reg) String() string {
switch {
case R0 <= r && r <= R31:
return fmt.Sprintf("r%d", int(r-R0))
case F0 <= r && r <= F31:
return fmt.Sprintf("f%d", int(r-F0))
case V0 <= r && r <= V31:
return fmt.Sprintf("v%d", int(r-V0))
case VS0 <= r && r <= VS63:
return fmt.Sprintf("vs%d", int(r-VS0))
default:
return fmt.Sprintf("Reg(%d)", int(r))
}
}
// CondReg is a bit or field in the condition register.
type CondReg int8
const (
_ CondReg = iota
// Condition Regster bits
Cond0LT
Cond0GT
Cond0EQ
Cond0SO
Cond1LT
Cond1GT
Cond1EQ
Cond1SO
Cond2LT
Cond2GT
Cond2EQ
Cond2SO
Cond3LT
Cond3GT
Cond3EQ
Cond3SO
Cond4LT
Cond4GT
Cond4EQ
Cond4SO
Cond5LT
Cond5GT
Cond5EQ
Cond5SO
Cond6LT
Cond6GT
Cond6EQ
Cond6SO
Cond7LT
Cond7GT
Cond7EQ
Cond7SO
// Condition Register Fields
CR0
CR1
CR2
CR3
CR4
CR5
CR6
CR7
)
func (CondReg) IsArg() {}
func (c CondReg) String() string {
switch {
default:
return fmt.Sprintf("CondReg(%d)", int(c))
case c >= CR0:
return fmt.Sprintf("CR%d", int(c-CR0))
case c >= Cond0LT && c < CR0:
return fmt.Sprintf("Cond%d%s", int((c-Cond0LT)/4), [4]string{"LT", "GT", "EQ", "SO"}[(c-Cond0LT)%4])
}
}
// SpReg is a special register, its meaning depends on Op.
type SpReg uint16
const (
SpRegZero SpReg = 0
)
func (SpReg) IsArg() {}
func (s SpReg) String() string {
return fmt.Sprintf("SpReg(%d)", int(s))
}
// PCRel is a PC-relative offset, used only in branch instructions.
type PCRel int32
func (PCRel) IsArg() {}
func (r PCRel) String() string {
return fmt.Sprintf("PC%+#x", int32(r))
}
// A Label is a code (text) address, used only in absolute branch instructions.
type Label uint32
func (Label) IsArg() {}
func (l Label) String() string {
return fmt.Sprintf("%#x", uint32(l))
}
// Imm represents an immediate number.
type Imm int32
func (Imm) IsArg() {}
func (i Imm) String() string {
return fmt.Sprintf("%d", int32(i))
}
// Offset represents a memory offset immediate.
type Offset int32
func (Offset) IsArg() {}
func (o Offset) String() string {
return fmt.Sprintf("%+d", int32(o))
}

245
vendor/golang.org/x/arch/ppc64/ppc64asm/plan9.go generated vendored Normal file

@ -0,0 +1,245 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64asm
import (
"fmt"
"strings"
)
// GoSyntax returns the Go assembler syntax for the instruction.
// The pc is the program counter of the first instruction, used for expanding
// PC-relative addresses into absolute ones.
// The symname function queries the symbol table for the program
// being disassembled. It returns the name and base address of the symbol
// containing the target, if any; otherwise it returns "", 0.
func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64)) string {
if symname == nil {
symname = func(uint64) (string, uint64) { return "", 0 }
}
if inst.Op == 0 && inst.Enc == 0 {
return "WORD $0"
} else if inst.Op == 0 {
return "?"
}
var args []string
for i, a := range inst.Args[:] {
if a == nil {
break
}
if s := plan9Arg(&inst, i, pc, a, symname); s != "" {
// In the case for some BC instructions, a CondReg arg has
// both the CR and the branch condition encoded in its value.
// plan9Arg will return a string with the string representation
// of these values separated by a blank that will be treated
// as 2 args from this point on.
if strings.IndexByte(s, ' ') > 0 {
t := strings.Split(s, " ")
args = append(args, t[0])
args = append(args, t[1])
} else {
args = append(args, s)
}
}
}
var op string
op = plan9OpMap[inst.Op]
if op == "" {
op = strings.ToUpper(inst.Op.String())
if op[len(op)-1] == '.' {
op = op[:len(op)-1] + "CC"
}
}
// laid out the instruction
switch inst.Op {
default: // dst, sA, sB, ...
if len(args) == 0 {
return op
} else if len(args) == 1 {
return fmt.Sprintf("%s %s", op, args[0])
}
args = append(args, args[0])
return op + " " + strings.Join(args[1:], ",")
case SYNC:
if args[0] == "$1" {
return "LWSYNC"
}
return "HWSYNC"
case ISEL:
return "ISEL " + args[3] + "," + args[1] + "," + args[2] + "," + args[0]
// store instructions always have the memory operand at the end, no need to reorder
// indexed stores handled separately
case STB, STBU,
STH, STHU,
STW, STWU,
STD, STDU,
STQ:
return op + " " + strings.Join(args, ",")
case CMPD, CMPDI, CMPLD, CMPLDI, CMPW, CMPWI, CMPLW, CMPLWI:
if len(args) == 2 {
return op + " " + args[0] + "," + args[1]
} else if len(args) == 3 {
return op + " " + args[0] + "," + args[1] + "," + args[2]
}
return op + " " + args[0] + " ??"
case LIS:
return "ADDIS $0," + args[1] + "," + args[0]
// store instructions with index registers
case STBX, STBUX, STHX, STHUX, STWX, STWUX, STDX, STDUX,
STHBRX, STWBRX, STDBRX, STSWX, STFSX, STFSUX, STFDX, STFDUX, STFIWX, STFDPX:
return "MOV" + op[2:len(op)-1] + " " + args[0] + ",(" + args[2] + ")(" + args[1] + ")"
case STDCXCC, STWCXCC, STHCXCC, STBCXCC:
return op + " " + args[0] + ",(" + args[2] + ")(" + args[1] + ")"
case STXVD2X, STXVW4X:
return op + " " + args[0] + ",(" + args[2] + ")(" + args[1] + ")"
// load instructions with index registers
case LBZX, LBZUX, LHZX, LHZUX, LWZX, LWZUX, LDX, LDUX,
LHBRX, LWBRX, LDBRX, LSWX, LFSX, LFSUX, LFDX, LFDUX, LFIWAX, LFIWZX:
return "MOV" + op[1:len(op)-1] + " (" + args[2] + ")(" + args[1] + ")," + args[0]
case LDARX, LWARX, LHARX, LBARX:
return op + " (" + args[2] + ")(" + args[1] + ")," + args[0]
case LXVD2X, LXVW4X:
return op + " (" + args[2] + ")(" + args[1] + ")," + args[0]
case DCBT, DCBTST, DCBZ, DCBST:
return op + " (" + args[1] + ")"
// branch instructions needs additional handling
case BCLR:
if int(inst.Args[0].(Imm))&20 == 20 { // unconditional
return "RET"
}
return op + " " + strings.Join(args, ", ")
case BC:
if int(inst.Args[0].(Imm))&0x1c == 12 { // jump on cond bit set
if len(args) == 4 {
return fmt.Sprintf("B%s %s,%s", args[1], args[2], args[3])
}
return fmt.Sprintf("B%s %s", args[1], args[2])
} else if int(inst.Args[0].(Imm))&0x1c == 4 && revCondMap[args[1]] != "" { // jump on cond bit not set
if len(args) == 4 {
return fmt.Sprintf("B%s %s,%s", revCondMap[args[1]], args[2], args[3])
}
return fmt.Sprintf("B%s %s", revCondMap[args[1]], args[2])
}
return op + " " + strings.Join(args, ",")
case BCCTR:
if int(inst.Args[0].(Imm))&20 == 20 { // unconditional
return "BR (CTR)"
}
return op + " " + strings.Join(args, ", ")
case BCCTRL:
if int(inst.Args[0].(Imm))&20 == 20 { // unconditional
return "BL (CTR)"
}
return op + " " + strings.Join(args, ",")
case BCA, BCL, BCLA, BCLRL, BCTAR, BCTARL:
return op + " " + strings.Join(args, ",")
}
}
// plan9Arg formats arg (which is the argIndex's arg in inst) according to Plan 9 rules.
// NOTE: because Plan9Syntax is the only caller of this func, and it receives a copy
// of inst, it's ok to modify inst.Args here.
func plan9Arg(inst *Inst, argIndex int, pc uint64, arg Arg, symname func(uint64) (string, uint64)) string {
// special cases for load/store instructions
if _, ok := arg.(Offset); ok {
if argIndex+1 == len(inst.Args) || inst.Args[argIndex+1] == nil {
panic(fmt.Errorf("wrong table: offset not followed by register"))
}
}
switch arg := arg.(type) {
case Reg:
if isLoadStoreOp(inst.Op) && argIndex == 1 && arg == R0 {
return "0"
}
if arg == R30 {
return "g"
}
return strings.ToUpper(arg.String())
case CondReg:
// This op is left as its numerical value, not mapped onto CR + condition
if inst.Op == ISEL {
return fmt.Sprintf("$%d", (arg - Cond0LT))
}
if arg == CR0 && strings.HasPrefix(inst.Op.String(), "cmp") {
return "" // don't show cr0 for cmp instructions
} else if arg >= CR0 {
return fmt.Sprintf("CR%d", int(arg-CR0))
}
bit := [4]string{"LT", "GT", "EQ", "SO"}[(arg-Cond0LT)%4]
if arg <= Cond0SO {
return bit
}
return fmt.Sprintf("%s CR%d", bit, int(arg-Cond0LT)/4)
case Imm:
return fmt.Sprintf("$%d", arg)
case SpReg:
switch arg {
case 8:
return "LR"
case 9:
return "CTR"
}
return fmt.Sprintf("SPR(%d)", int(arg))
case PCRel:
addr := pc + uint64(int64(arg))
if s, base := symname(addr); s != "" && base == addr {
return fmt.Sprintf("%s(SB)", s)
}
return fmt.Sprintf("%#x", addr)
case Label:
return fmt.Sprintf("%#x", int(arg))
case Offset:
reg := inst.Args[argIndex+1].(Reg)
removeArg(inst, argIndex+1)
if reg == R0 {
return fmt.Sprintf("%d(0)", int(arg))
}
return fmt.Sprintf("%d(R%d)", int(arg), reg-R0)
}
return fmt.Sprintf("???(%v)", arg)
}
// revCondMap maps a conditional register bit to its inverse, if possible.
var revCondMap = map[string]string{
"LT": "GE", "GT": "LE", "EQ": "NE",
}
// plan9OpMap maps an Op to its Plan 9 mnemonics, if different than its GNU mnemonics.
var plan9OpMap = map[Op]string{
LWARX: "LWAR",
LDARX: "LDAR",
LHARX: "LHAR",
LBARX: "LBAR",
ADDI: "ADD",
SRADI: "SRAD",
SUBF: "SUB",
LI: "MOVD",
LBZ: "MOVBZ", STB: "MOVB",
LBZU: "MOVBZU", STBU: "MOVBU",
LHZ: "MOVHZ", LHA: "MOVH", STH: "MOVH",
LHZU: "MOVHZU", STHU: "MOVHU",
LWZ: "MOVWZ", LWA: "MOVW", STW: "MOVW",
LWZU: "MOVWZU", STWU: "MOVWU",
LD: "MOVD", STD: "MOVD",
LDU: "MOVDU", STDU: "MOVDU",
CMPD: "CMP", CMPDI: "CMP",
CMPW: "CMPW", CMPWI: "CMPW",
CMPLD: "CMPU", CMPLDI: "CMPU",
CMPLW: "CMPWU", CMPLWI: "CMPWU",
MTSPR: "MOVD", MFSPR: "MOVD", // the width is ambiguous for SPRs
B: "BR",
BL: "CALL",
}

5494
vendor/golang.org/x/arch/ppc64/ppc64asm/tables.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1
vendor/modules.txt vendored

@ -70,6 +70,7 @@ go.starlark.net/syntax
# golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4
## explicit
golang.org/x/arch/arm64/arm64asm
golang.org/x/arch/ppc64/ppc64asm
golang.org/x/arch/x86/x86asm
# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
golang.org/x/mod/semver