diff --git a/go.mod b/go.mod index 94bbf51f..46a146e2 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/spf13/pflag v0.0.0-20170417173400-9e4c21054fa1 // indirect github.com/stretchr/testify v1.3.0 // indirect go.starlark.net v0.0.0-20190702223751-32f345186213 - golang.org/x/arch v0.0.0-20171004143515-077ac972c2e4 + golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4 golang.org/x/crypto v0.0.0-20180614174826-fd5f17ee7299 // indirect golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb golang.org/x/tools v0.0.0-20181120060634-fc4f04983f62 diff --git a/go.sum b/go.sum old mode 100644 new mode 100755 index a23241dc..68e09854 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= go.starlark.net v0.0.0-20190702223751-32f345186213 h1:lkYv5AKwvvduv5XWP6szk/bvvgO6aDeUujhZQXIFTes= go.starlark.net v0.0.0-20190702223751-32f345186213/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= -golang.org/x/arch v0.0.0-20171004143515-077ac972c2e4 h1:TP7YcWHbnFq4v8/3wM2JwgM0SRRtsYJ7Z6Oj0arz2bs= -golang.org/x/arch v0.0.0-20171004143515-077ac972c2e4/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= +golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4 h1:QlVATYS7JBoZMVaf+cNjb90WD/beKVHnIxFKT4QaHVI= +golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/crypto v0.0.0-20180614174826-fd5f17ee7299 h1:zxP+xTjjk4kD+M5IFPweL7/4851FUhYkzbDqbzkN1JE= golang.org/x/crypto v0.0.0-20180614174826-fd5f17ee7299/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= diff --git a/pkg/proc/amd64_arch.go b/pkg/proc/amd64_arch.go new file mode 100644 index 00000000..223c120f --- /dev/null +++ b/pkg/proc/amd64_arch.go @@ -0,0 +1,299 @@ +package proc + +import ( + "encoding/binary" + + "github.com/go-delve/delve/pkg/dwarf/frame" + "github.com/go-delve/delve/pkg/dwarf/op" + "golang.org/x/arch/x86/x86asm" +) + +// AMD64 represents the AMD64 CPU architecture. +type AMD64 struct { + gStructOffset uint64 + goos string + + // crosscall2fn is the DIE of crosscall2, a function used by the go runtime + // to call C functions. This function in go 1.9 (and previous versions) had + // a bad frame descriptor which needs to be fixed to generate good stack + // traces. + crosscall2fn *Function + + // sigreturnfn is the DIE of runtime.sigreturn, the return trampoline for + // the signal handler. See comment in FixFrameUnwindContext for a + // description of why this is needed. + sigreturnfn *Function +} + +const ( + amd64DwarfIPRegNum uint64 = 16 + amd64DwarfSPRegNum uint64 = 7 + amd64DwarfBPRegNum uint64 = 6 +) + +var amd64BreakInstruction = []byte{0xCC} + +// AMD64Arch returns an initialized AMD64 +// struct. +func AMD64Arch(goos string) *AMD64 { + return &AMD64{ + goos: goos, + } +} + +// PtrSize returns the size of a pointer +// on this architecture. +func (a *AMD64) PtrSize() int { + return 8 +} + +// MinInstructionLength returns the min lenth +// of the instruction +func (a *AMD64) MinInstructionLength() int { + return 1 +} + +// BreakpointInstruction returns the Breakpoint +// instruction for this architecture. +func (a *AMD64) BreakpointInstruction() []byte { + return amd64BreakInstruction +} + +// BreakpointSize returns the size of the +// breakpoint instruction on this architecture. +func (a *AMD64) BreakpointSize() int { + return len(amd64BreakInstruction) +} + +// DerefTLS returns true if the value of regs.TLS()+GStructOffset() is a +// pointer to the G struct +func (a *AMD64) DerefTLS() bool { + return a.goos == "windows" +} + +// FixFrameUnwindContext adds default architecture rules to fctxt or returns +// the default frame unwind context if fctxt is nil. +func (a *AMD64) FixFrameUnwindContext(fctxt *frame.FrameContext, pc uint64, bi *BinaryInfo) *frame.FrameContext { + if a.sigreturnfn == nil { + a.sigreturnfn = bi.LookupFunc["runtime.sigreturn"] + } + + if fctxt == nil || (a.sigreturnfn != nil && pc >= a.sigreturnfn.Entry && pc < a.sigreturnfn.End) { + // When there's no frame descriptor entry use BP (the frame pointer) instead + // - return register is [bp + a.PtrSize()] (i.e. [cfa-a.PtrSize()]) + // - cfa is bp + a.PtrSize()*2 + // - bp is [bp] (i.e. [cfa-a.PtrSize()*2]) + // - sp is cfa + + // When the signal handler runs it will move the execution to the signal + // handling stack (installed using the sigaltstack system call). + // This isn't a proper stack switch: the pointer to g in TLS will still + // refer to whatever g was executing on that thread before the signal was + // received. + // Since go did not execute a stack switch the previous value of sp, pc + // and bp is not saved inside g.sched, as it normally would. + // The only way to recover is to either read sp/pc from the signal context + // parameter (the ucontext_t* parameter) or to unconditionally follow the + // frame pointer when we get to runtime.sigreturn (which is what we do + // here). + + return &frame.FrameContext{ + RetAddrReg: amd64DwarfIPRegNum, + Regs: map[uint64]frame.DWRule{ + amd64DwarfIPRegNum: frame.DWRule{ + Rule: frame.RuleOffset, + Offset: int64(-a.PtrSize()), + }, + amd64DwarfBPRegNum: frame.DWRule{ + Rule: frame.RuleOffset, + Offset: int64(-2 * a.PtrSize()), + }, + amd64DwarfSPRegNum: frame.DWRule{ + Rule: frame.RuleValOffset, + Offset: 0, + }, + }, + CFA: frame.DWRule{ + Rule: frame.RuleCFA, + Reg: amd64DwarfBPRegNum, + Offset: int64(2 * a.PtrSize()), + }, + } + } + + if a.crosscall2fn == nil { + a.crosscall2fn = bi.LookupFunc["crosscall2"] + } + + if a.crosscall2fn != nil && pc >= a.crosscall2fn.Entry && pc < a.crosscall2fn.End { + rule := fctxt.CFA + if rule.Offset == crosscall2SPOffsetBad { + switch a.goos { + case "windows": + rule.Offset += crosscall2SPOffsetWindows + default: + rule.Offset += crosscall2SPOffsetNonWindows + } + } + fctxt.CFA = rule + } + + // We assume that RBP is the frame pointer and we want to keep it updated, + // so that we can use it to unwind the stack even when we encounter frames + // without descriptor entries. + // If there isn't a rule already we emit one. + if fctxt.Regs[amd64DwarfBPRegNum].Rule == frame.RuleUndefined { + fctxt.Regs[amd64DwarfBPRegNum] = frame.DWRule{ + Rule: frame.RuleFramePointer, + Reg: amd64DwarfBPRegNum, + Offset: 0, + } + } + + return fctxt +} + +// RegSize returns the size (in bytes) of register regnum. +// The mapping between hardware registers and DWARF registers is specified +// in the System V ABI AMD64 Architecture Processor Supplement page 57, +// figure 3.36 +// https://www.uclibc.org/docs/psABI-x86_64.pdf +func (a *AMD64) RegSize(regnum uint64) int { + // XMM registers + if regnum > amd64DwarfIPRegNum && regnum <= 32 { + return 16 + } + // x87 registers + if regnum >= 33 && regnum <= 40 { + return 10 + } + return 8 +} + +// The mapping between hardware registers and DWARF registers is specified +// in the System V ABI AMD64 Architecture Processor Supplement page 57, +// figure 3.36 +// https://www.uclibc.org/docs/psABI-x86_64.pdf + +var amd64DwarfToHardware = map[int]x86asm.Reg{ + 0: x86asm.RAX, + 1: x86asm.RDX, + 2: x86asm.RCX, + 3: x86asm.RBX, + 4: x86asm.RSI, + 5: x86asm.RDI, + 8: x86asm.R8, + 9: x86asm.R9, + 10: x86asm.R10, + 11: x86asm.R11, + 12: x86asm.R12, + 13: x86asm.R13, + 14: x86asm.R14, + 15: x86asm.R15, +} + +var amd64DwarfToName = map[int]string{ + 17: "XMM0", + 18: "XMM1", + 19: "XMM2", + 20: "XMM3", + 21: "XMM4", + 22: "XMM5", + 23: "XMM6", + 24: "XMM7", + 25: "XMM8", + 26: "XMM9", + 27: "XMM10", + 28: "XMM11", + 29: "XMM12", + 30: "XMM13", + 31: "XMM14", + 32: "XMM15", + 33: "ST(0)", + 34: "ST(1)", + 35: "ST(2)", + 36: "ST(3)", + 37: "ST(4)", + 38: "ST(5)", + 39: "ST(6)", + 40: "ST(7)", + 49: "Eflags", + 50: "Es", + 51: "Cs", + 52: "Ss", + 53: "Ds", + 54: "Fs", + 55: "Gs", + 58: "Fs_base", + 59: "Gs_base", + 64: "MXCSR", + 65: "CW", + 66: "SW", +} + +func maxAmd64DwarfRegister() int { + max := int(amd64DwarfIPRegNum) + for i := range amd64DwarfToHardware { + if i > max { + max = i + } + } + for i := range amd64DwarfToName { + if i > max { + max = i + } + } + return max +} + +// RegistersToDwarfRegisters converts hardware registers to the format used +// by the DWARF expression interpreter. +func (a *AMD64) RegistersToDwarfRegisters(staticBase uint64, regs Registers) op.DwarfRegisters { + dregs := make([]*op.DwarfRegister, maxAmd64DwarfRegister()+1) + + dregs[amd64DwarfIPRegNum] = op.DwarfRegisterFromUint64(regs.PC()) + dregs[amd64DwarfSPRegNum] = op.DwarfRegisterFromUint64(regs.SP()) + dregs[amd64DwarfBPRegNum] = op.DwarfRegisterFromUint64(regs.BP()) + + for dwarfReg, asmReg := range amd64DwarfToHardware { + v, err := regs.Get(int(asmReg)) + if err == nil { + dregs[dwarfReg] = op.DwarfRegisterFromUint64(v) + } + } + + for _, reg := range regs.Slice(true) { + for dwarfReg, regName := range amd64DwarfToName { + if regName == reg.Name { + dregs[dwarfReg] = op.DwarfRegisterFromBytes(reg.Bytes) + } + } + } + + return op.DwarfRegisters{ + StaticBase: staticBase, + Regs: dregs, + ByteOrder: binary.LittleEndian, + PCRegNum: amd64DwarfIPRegNum, + SPRegNum: amd64DwarfSPRegNum, + BPRegNum: amd64DwarfBPRegNum, + } +} + +// AddrAndStackRegsToDwarfRegisters returns DWARF registers from the passed in +// PC, SP, and BP registers in the format used by the DWARF expression interpreter. +func (a *AMD64) AddrAndStackRegsToDwarfRegisters(staticBase, pc, sp, bp uint64) op.DwarfRegisters { + dregs := make([]*op.DwarfRegister, amd64DwarfIPRegNum+1) + dregs[amd64DwarfIPRegNum] = op.DwarfRegisterFromUint64(pc) + dregs[amd64DwarfSPRegNum] = op.DwarfRegisterFromUint64(sp) + dregs[amd64DwarfBPRegNum] = op.DwarfRegisterFromUint64(bp) + + return op.DwarfRegisters{ + StaticBase: staticBase, + Regs: dregs, + ByteOrder: binary.LittleEndian, + PCRegNum: amd64DwarfIPRegNum, + SPRegNum: amd64DwarfSPRegNum, + BPRegNum: amd64DwarfBPRegNum, + } +} diff --git a/pkg/proc/arch.go b/pkg/proc/arch.go index a31f9494..7c14a641 100644 --- a/pkg/proc/arch.go +++ b/pkg/proc/arch.go @@ -1,17 +1,15 @@ package proc import ( - "encoding/binary" - "github.com/go-delve/delve/pkg/dwarf/frame" "github.com/go-delve/delve/pkg/dwarf/op" - "golang.org/x/arch/x86/x86asm" ) // Arch defines an interface for representing a // CPU architecture. type Arch interface { PtrSize() int + MinInstructionLength() int BreakpointInstruction() []byte BreakpointSize() int DerefTLS() bool @@ -21,292 +19,8 @@ type Arch interface { AddrAndStackRegsToDwarfRegisters(uint64, uint64, uint64, uint64) op.DwarfRegisters } -// AMD64 represents the AMD64 CPU architecture. -type AMD64 struct { - gStructOffset uint64 - goos string - - // crosscall2fn is the DIE of crosscall2, a function used by the go runtime - // to call C functions. This function in go 1.9 (and previous versions) had - // a bad frame descriptor which needs to be fixed to generate good stack - // traces. - crosscall2fn *Function - - // sigreturnfn is the DIE of runtime.sigreturn, the return trampoline for - // the signal handler. See comment in FixFrameUnwindContext for a - // description of why this is needed. - sigreturnfn *Function -} - -const ( - amd64DwarfIPRegNum uint64 = 16 - amd64DwarfSPRegNum uint64 = 7 - amd64DwarfBPRegNum uint64 = 6 -) - -var amd64BreakInstruction = []byte{0xCC} - -// AMD64Arch returns an initialized AMD64 -// struct. -func AMD64Arch(goos string) *AMD64 { - return &AMD64{ - goos: goos, - } -} - -// PtrSize returns the size of a pointer -// on this architecture. -func (a *AMD64) PtrSize() int { - return 8 -} - -// BreakpointInstruction returns the Breakpoint -// instruction for this architecture. -func (a *AMD64) BreakpointInstruction() []byte { - return amd64BreakInstruction -} - -// BreakpointSize returns the size of the -// breakpoint instruction on this architecture. -func (a *AMD64) BreakpointSize() int { - return len(amd64BreakInstruction) -} - -// DerefTLS returns true if the value of regs.TLS()+GStructOffset() is a -// pointer to the G struct -func (a *AMD64) DerefTLS() bool { - return a.goos == "windows" -} - const ( crosscall2SPOffsetBad = 0x8 crosscall2SPOffsetWindows = 0x118 crosscall2SPOffsetNonWindows = 0x58 ) - -// FixFrameUnwindContext adds default architecture rules to fctxt or returns -// the default frame unwind context if fctxt is nil. -func (a *AMD64) FixFrameUnwindContext(fctxt *frame.FrameContext, pc uint64, bi *BinaryInfo) *frame.FrameContext { - if a.sigreturnfn == nil { - a.sigreturnfn = bi.LookupFunc["runtime.sigreturn"] - } - - if fctxt == nil || (a.sigreturnfn != nil && pc >= a.sigreturnfn.Entry && pc < a.sigreturnfn.End) { - // When there's no frame descriptor entry use BP (the frame pointer) instead - // - return register is [bp + a.PtrSize()] (i.e. [cfa-a.PtrSize()]) - // - cfa is bp + a.PtrSize()*2 - // - bp is [bp] (i.e. [cfa-a.PtrSize()*2]) - // - sp is cfa - - // When the signal handler runs it will move the execution to the signal - // handling stack (installed using the sigaltstack system call). - // This isn't a proper stack switch: the pointer to g in TLS will still - // refer to whatever g was executing on that thread before the signal was - // received. - // Since go did not execute a stack switch the previous value of sp, pc - // and bp is not saved inside g.sched, as it normally would. - // The only way to recover is to either read sp/pc from the signal context - // parameter (the ucontext_t* parameter) or to unconditionally follow the - // frame pointer when we get to runtime.sigreturn (which is what we do - // here). - - return &frame.FrameContext{ - RetAddrReg: amd64DwarfIPRegNum, - Regs: map[uint64]frame.DWRule{ - amd64DwarfIPRegNum: frame.DWRule{ - Rule: frame.RuleOffset, - Offset: int64(-a.PtrSize()), - }, - amd64DwarfBPRegNum: frame.DWRule{ - Rule: frame.RuleOffset, - Offset: int64(-2 * a.PtrSize()), - }, - amd64DwarfSPRegNum: frame.DWRule{ - Rule: frame.RuleValOffset, - Offset: 0, - }, - }, - CFA: frame.DWRule{ - Rule: frame.RuleCFA, - Reg: amd64DwarfBPRegNum, - Offset: int64(2 * a.PtrSize()), - }, - } - } - - if a.crosscall2fn == nil { - a.crosscall2fn = bi.LookupFunc["crosscall2"] - } - - if a.crosscall2fn != nil && pc >= a.crosscall2fn.Entry && pc < a.crosscall2fn.End { - rule := fctxt.CFA - if rule.Offset == crosscall2SPOffsetBad { - switch a.goos { - case "windows": - rule.Offset += crosscall2SPOffsetWindows - default: - rule.Offset += crosscall2SPOffsetNonWindows - } - } - fctxt.CFA = rule - } - - // We assume that RBP is the frame pointer and we want to keep it updated, - // so that we can use it to unwind the stack even when we encounter frames - // without descriptor entries. - // If there isn't a rule already we emit one. - if fctxt.Regs[amd64DwarfBPRegNum].Rule == frame.RuleUndefined { - fctxt.Regs[amd64DwarfBPRegNum] = frame.DWRule{ - Rule: frame.RuleFramePointer, - Reg: amd64DwarfBPRegNum, - Offset: 0, - } - } - - return fctxt -} - -// RegSize returns the size (in bytes) of register regnum. -// The mapping between hardware registers and DWARF registers is specified -// in the System V ABI AMD64 Architecture Processor Supplement page 57, -// figure 3.36 -// https://www.uclibc.org/docs/psABI-x86_64.pdf -func (a *AMD64) RegSize(regnum uint64) int { - // XMM registers - if regnum > amd64DwarfIPRegNum && regnum <= 32 { - return 16 - } - // x87 registers - if regnum >= 33 && regnum <= 40 { - return 10 - } - return 8 -} - -// The mapping between hardware registers and DWARF registers is specified -// in the System V ABI AMD64 Architecture Processor Supplement page 57, -// figure 3.36 -// https://www.uclibc.org/docs/psABI-x86_64.pdf - -var asm64DwarfToHardware = map[int]x86asm.Reg{ - 0: x86asm.RAX, - 1: x86asm.RDX, - 2: x86asm.RCX, - 3: x86asm.RBX, - 4: x86asm.RSI, - 5: x86asm.RDI, - 8: x86asm.R8, - 9: x86asm.R9, - 10: x86asm.R10, - 11: x86asm.R11, - 12: x86asm.R12, - 13: x86asm.R13, - 14: x86asm.R14, - 15: x86asm.R15, -} - -var amd64DwarfToName = map[int]string{ - 17: "XMM0", - 18: "XMM1", - 19: "XMM2", - 20: "XMM3", - 21: "XMM4", - 22: "XMM5", - 23: "XMM6", - 24: "XMM7", - 25: "XMM8", - 26: "XMM9", - 27: "XMM10", - 28: "XMM11", - 29: "XMM12", - 30: "XMM13", - 31: "XMM14", - 32: "XMM15", - 33: "ST(0)", - 34: "ST(1)", - 35: "ST(2)", - 36: "ST(3)", - 37: "ST(4)", - 38: "ST(5)", - 39: "ST(6)", - 40: "ST(7)", - 49: "Eflags", - 50: "Es", - 51: "Cs", - 52: "Ss", - 53: "Ds", - 54: "Fs", - 55: "Gs", - 58: "Fs_base", - 59: "Gs_base", - 64: "MXCSR", - 65: "CW", - 66: "SW", -} - -func maxAmd64DwarfRegister() int { - max := int(amd64DwarfIPRegNum) - for i := range asm64DwarfToHardware { - if i > max { - max = i - } - } - for i := range amd64DwarfToName { - if i > max { - max = i - } - } - return max -} - -// RegistersToDwarfRegisters converts hardware registers to the format used -// by the DWARF expression interpreter. -func (a *AMD64) RegistersToDwarfRegisters(staticBase uint64, regs Registers) op.DwarfRegisters { - dregs := make([]*op.DwarfRegister, maxAmd64DwarfRegister()+1) - - dregs[amd64DwarfIPRegNum] = op.DwarfRegisterFromUint64(regs.PC()) - dregs[amd64DwarfSPRegNum] = op.DwarfRegisterFromUint64(regs.SP()) - dregs[amd64DwarfBPRegNum] = op.DwarfRegisterFromUint64(regs.BP()) - - for dwarfReg, asmReg := range asm64DwarfToHardware { - v, err := regs.Get(int(asmReg)) - if err == nil { - dregs[dwarfReg] = op.DwarfRegisterFromUint64(v) - } - } - - for _, reg := range regs.Slice(true) { - for dwarfReg, regName := range amd64DwarfToName { - if regName == reg.Name { - dregs[dwarfReg] = op.DwarfRegisterFromBytes(reg.Bytes) - } - } - } - - return op.DwarfRegisters{ - StaticBase: staticBase, - Regs: dregs, - ByteOrder: binary.LittleEndian, - PCRegNum: amd64DwarfIPRegNum, - SPRegNum: amd64DwarfSPRegNum, - BPRegNum: amd64DwarfBPRegNum, - } -} - -// AddrAndStackRegsToDwarfRegisters returns DWARF registers from the passed in -// PC, SP, and BP registers in the format used by the DWARF expression interpreter. -func (a *AMD64) AddrAndStackRegsToDwarfRegisters(staticBase, pc, sp, bp uint64) op.DwarfRegisters { - dregs := make([]*op.DwarfRegister, amd64DwarfIPRegNum+1) - dregs[amd64DwarfIPRegNum] = op.DwarfRegisterFromUint64(pc) - dregs[amd64DwarfSPRegNum] = op.DwarfRegisterFromUint64(sp) - dregs[amd64DwarfBPRegNum] = op.DwarfRegisterFromUint64(bp) - - return op.DwarfRegisters{ - StaticBase: staticBase, - Regs: dregs, - ByteOrder: binary.LittleEndian, - PCRegNum: amd64DwarfIPRegNum, - SPRegNum: amd64DwarfSPRegNum, - BPRegNum: amd64DwarfBPRegNum, - } -} diff --git a/pkg/proc/arm64_arch.go b/pkg/proc/arm64_arch.go new file mode 100644 index 00000000..b9474bd8 --- /dev/null +++ b/pkg/proc/arm64_arch.go @@ -0,0 +1,288 @@ +package proc + +import ( + "encoding/binary" + + "github.com/go-delve/delve/pkg/dwarf/frame" + "github.com/go-delve/delve/pkg/dwarf/op" + "golang.org/x/arch/arm64/arm64asm" +) + +// ARM64 represents the ARM64 CPU architecture. +type ARM64 struct { + gStructOffset uint64 + goos string + + // crosscall2fn is the DIE of crosscall2, a function used by the go runtime + // to call C functions. This function in go 1.9 (and previous versions) had + // a bad frame descriptor which needs to be fixed to generate good stack + // traces. + crosscall2fn *Function + + // sigreturnfn is the DIE of runtime.sigreturn, the return trampoline for + // the signal handler. See comment in FixFrameUnwindContext for a + // description of why this is needed. + sigreturnfn *Function +} + +const ( + arm64DwarfIPRegNum uint64 = 32 + arm64DwarfSPRegNum uint64 = 31 + arm64DwarfBPRegNum uint64 = 29 +) + +var arm64BreakInstruction = []byte{0x0, 0x0, 0x20, 0xd4} + +// ARM64Arch returns an initialized ARM64 +// struct. +func ARM64Arch(goos string) *ARM64 { + return &ARM64{ + goos: goos, + } +} + +// PtrSize returns the size of a pointer +// on this architecture. +func (a *ARM64) PtrSize() int { + return 8 +} + +// MinInstructionLength returns the min lenth +// of the instruction +func (a *ARM64) MinInstructionLength() int { + return 4 +} + +// BreakpointInstruction returns the Breakpoint +// instruction for this architecture. +func (a *ARM64) BreakpointInstruction() []byte { + return arm64BreakInstruction +} + +// BreakpointSize returns the size of the +// breakpoint instruction on this architecture. +func (a *ARM64) BreakpointSize() int { + return len(arm64BreakInstruction) +} + +// Always return false for now. +func (a *ARM64) DerefTLS() bool { + return false +} + +// FixFrameUnwindContext adds default architecture rules to fctxt or returns +// the default frame unwind context if fctxt is nil. +func (a *ARM64) FixFrameUnwindContext(fctxt *frame.FrameContext, pc uint64, bi *BinaryInfo) *frame.FrameContext { + if a.sigreturnfn == nil { + a.sigreturnfn = bi.LookupFunc["runtime.sigreturn"] + } + + if fctxt == nil || (a.sigreturnfn != nil && pc >= a.sigreturnfn.Entry && pc < a.sigreturnfn.End) { + // When there's no frame descriptor entry use BP (the frame pointer) instead + // - return register is [bp + a.PtrSize()] (i.e. [cfa-a.PtrSize()]) + // - cfa is bp + a.PtrSize()*2 + // - bp is [bp] (i.e. [cfa-a.PtrSize()*2]) + // - sp is cfa + + // When the signal handler runs it will move the execution to the signal + // handling stack (installed using the sigaltstack system call). + // This isn't a proper stack switch: the pointer to g in TLS will still + // refer to whatever g was executing on that thread before the signal was + // received. + // Since go did not execute a stack switch the previous value of sp, pc + // and bp is not saved inside g.sched, as it normally would. + // The only way to recover is to either read sp/pc from the signal context + // parameter (the ucontext_t* parameter) or to unconditionally follow the + // frame pointer when we get to runtime.sigreturn (which is what we do + // here). + + return &frame.FrameContext{ + RetAddrReg: arm64DwarfIPRegNum, + Regs: map[uint64]frame.DWRule{ + arm64DwarfIPRegNum: frame.DWRule{ + Rule: frame.RuleOffset, + Offset: int64(-a.PtrSize()), + }, + arm64DwarfBPRegNum: frame.DWRule{ + Rule: frame.RuleOffset, + Offset: int64(-2 * a.PtrSize()), + }, + arm64DwarfSPRegNum: frame.DWRule{ + Rule: frame.RuleValOffset, + Offset: 0, + }, + }, + CFA: frame.DWRule{ + Rule: frame.RuleCFA, + Reg: arm64DwarfBPRegNum, + Offset: int64(2 * a.PtrSize()), + }, + } + } + + if a.crosscall2fn == nil { + a.crosscall2fn = bi.LookupFunc["crosscall2"] + } + + if a.crosscall2fn != nil && pc >= a.crosscall2fn.Entry && pc < a.crosscall2fn.End { + rule := fctxt.CFA + if rule.Offset == crosscall2SPOffsetBad { + switch a.goos { + case "windows": + rule.Offset += crosscall2SPOffsetWindows + default: + rule.Offset += crosscall2SPOffsetNonWindows + } + } + fctxt.CFA = rule + } + + // We assume that RBP is the frame pointer and we want to keep it updated, + // so that we can use it to unwind the stack even when we encounter frames + // without descriptor entries. + // If there isn't a rule already we emit one. + if fctxt.Regs[arm64DwarfBPRegNum].Rule == frame.RuleUndefined { + fctxt.Regs[arm64DwarfBPRegNum] = frame.DWRule{ + Rule: frame.RuleFramePointer, + Reg: arm64DwarfBPRegNum, + Offset: 0, + } + } + + return fctxt +} + +func (a *ARM64) RegSize(regnum uint64) int { + // fp registers + if regnum >= 64 && regnum <= 95 { + return 16 + } + + return 8 // general registers +} + +// The mapping between hardware registers and DWARF registers is specified +// in the DWARF for the ARMĀ® Architecture page 7, +// Table 1 +// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf +var arm64DwarfToHardware = map[int]arm64asm.Reg{ + 0: arm64asm.X0, + 1: arm64asm.X1, + 2: arm64asm.X2, + 3: arm64asm.X3, + 4: arm64asm.X4, + 5: arm64asm.X5, + 6: arm64asm.X6, + 7: arm64asm.X7, + 8: arm64asm.X8, + 9: arm64asm.X9, + 10: arm64asm.X10, + 11: arm64asm.X11, + 12: arm64asm.X12, + 13: arm64asm.X13, + 14: arm64asm.X14, + 15: arm64asm.X15, + 16: arm64asm.X16, + 17: arm64asm.X17, + 18: arm64asm.X18, + 19: arm64asm.X19, + 20: arm64asm.X20, + 21: arm64asm.X21, + 22: arm64asm.X22, + 23: arm64asm.X23, + 24: arm64asm.X24, + 25: arm64asm.X25, + 26: arm64asm.X26, + 27: arm64asm.X27, + 28: arm64asm.X28, + 29: arm64asm.X29, + 30: arm64asm.X30, + 31: arm64asm.SP, + + 64: arm64asm.V0, + 65: arm64asm.V1, + 66: arm64asm.V2, + 67: arm64asm.V3, + 68: arm64asm.V4, + 69: arm64asm.V5, + 70: arm64asm.V6, + 71: arm64asm.V7, + 72: arm64asm.V8, + 73: arm64asm.V9, + 74: arm64asm.V10, + 75: arm64asm.V11, + 76: arm64asm.V12, + 77: arm64asm.V13, + 78: arm64asm.V14, + 79: arm64asm.V15, + 80: arm64asm.V16, + 81: arm64asm.V17, + 82: arm64asm.V18, + 83: arm64asm.V19, + 84: arm64asm.V20, + 85: arm64asm.V21, + 86: arm64asm.V22, + 87: arm64asm.V23, + 88: arm64asm.V24, + 89: arm64asm.V25, + 90: arm64asm.V26, + 91: arm64asm.V27, + 92: arm64asm.V28, + 93: arm64asm.V29, + 94: arm64asm.V30, + 95: arm64asm.V31, +} + +func maxArm64DwarfRegister() int { + max := int(arm64DwarfIPRegNum) + for i := range arm64DwarfToHardware { + if i > max { + max = i + } + } + return max +} + +// RegistersToDwarfRegisters converts hardware registers to the format used +// by the DWARF expression interpreter. +func (a *ARM64) RegistersToDwarfRegisters(staticBase uint64, regs Registers) op.DwarfRegisters { + dregs := make([]*op.DwarfRegister, maxArm64DwarfRegister()+1) + + dregs[arm64DwarfIPRegNum] = op.DwarfRegisterFromUint64(regs.PC()) + dregs[arm64DwarfSPRegNum] = op.DwarfRegisterFromUint64(regs.SP()) + dregs[arm64DwarfBPRegNum] = op.DwarfRegisterFromUint64(regs.BP()) + + for dwarfReg, asmReg := range arm64DwarfToHardware { + v, err := regs.Get(int(asmReg)) + if err == nil { + dregs[dwarfReg] = op.DwarfRegisterFromUint64(v) + } + } + + return op.DwarfRegisters{ + StaticBase: staticBase, + Regs: dregs, + ByteOrder: binary.LittleEndian, + PCRegNum: arm64DwarfIPRegNum, + SPRegNum: arm64DwarfSPRegNum, + BPRegNum: arm64DwarfBPRegNum, + } +} + +// AddrAndStackRegsToDwarfRegisters returns DWARF registers from the passed in +// PC, SP, and BP registers in the format used by the DWARF expression interpreter. +func (a *ARM64) AddrAndStackRegsToDwarfRegisters(staticBase, pc, sp, bp uint64) op.DwarfRegisters { + dregs := make([]*op.DwarfRegister, arm64DwarfIPRegNum+1) + dregs[arm64DwarfIPRegNum] = op.DwarfRegisterFromUint64(pc) + dregs[arm64DwarfSPRegNum] = op.DwarfRegisterFromUint64(sp) + dregs[arm64DwarfBPRegNum] = op.DwarfRegisterFromUint64(bp) + + return op.DwarfRegisters{ + StaticBase: staticBase, + Regs: dregs, + ByteOrder: binary.LittleEndian, + PCRegNum: arm64DwarfIPRegNum, + SPRegNum: arm64DwarfSPRegNum, + BPRegNum: arm64DwarfBPRegNum, + } +} diff --git a/pkg/proc/bininfo.go b/pkg/proc/bininfo.go index 6ec05bef..9345b0bc 100644 --- a/pkg/proc/bininfo.go +++ b/pkg/proc/bininfo.go @@ -98,7 +98,7 @@ type BinaryInfo struct { } // ErrUnsupportedLinuxArch is returned when attempting to debug a binary compiled for an unsupported architecture. -var ErrUnsupportedLinuxArch = errors.New("unsupported architecture - only linux/amd64 is supported") +var ErrUnsupportedLinuxArch = errors.New("unsupported architecture - only linux/amd64 and linux/arm64 are supported") // ErrUnsupportedWindowsArch is returned when attempting to debug a binary compiled for an unsupported architecture. var ErrUnsupportedWindowsArch = errors.New("unsupported architecture of windows/386 - only windows/amd64 is supported") @@ -262,6 +262,8 @@ func NewBinaryInfo(goos, goarch string) *BinaryInfo { switch goarch { case "amd64": r.Arch = AMD64Arch(goos) + case "arm64": + r.Arch = ARM64Arch(goos) } return r @@ -825,7 +827,7 @@ func (bi *BinaryInfo) openSeparateDebugInfo(image *Image, exe *elf.File, debugIn return nil, nil, fmt.Errorf("can't open separate debug file %q: %v", debugFilePath, err.Error()) } - if elfFile.Machine != elf.EM_X86_64 { + if elfFile.Machine != elf.EM_X86_64 && elfFile.Machine != elf.EM_AARCH64 { sepFile.Close() return nil, nil, fmt.Errorf("can't open separate debug file %q: %v", debugFilePath, ErrUnsupportedLinuxArch.Error()) } @@ -873,7 +875,7 @@ func loadBinaryInfoElf(bi *BinaryInfo, image *Image, path string, addr uint64, w if err != nil { return err } - if elfFile.Machine != elf.EM_X86_64 { + if elfFile.Machine != elf.EM_X86_64 && elfFile.Machine != elf.EM_AARCH64 { return ErrUnsupportedLinuxArch } diff --git a/pkg/proc/disasm.go b/pkg/proc/disasm.go index 44ceef62..181bf4f7 100644 --- a/pkg/proc/disasm.go +++ b/pkg/proc/disasm.go @@ -32,13 +32,14 @@ func Disassemble(mem MemoryReadWriter, regs Registers, breakpoints *BreakpointMa } func disassemble(memrw MemoryReadWriter, regs Registers, breakpoints *BreakpointMap, bi *BinaryInfo, startAddr, endAddr uint64, singleInstr bool) ([]AsmInstruction, error) { + minInstructionLength := bi.Arch.MinInstructionLength() mem := make([]byte, int(endAddr-startAddr)) _, err := memrw.ReadMemory(mem, uintptr(startAddr)) if err != nil { return nil, err } - r := make([]AsmInstruction, 0, len(mem)/15) + r := make([]AsmInstruction, 0, len(mem)/int(maxInstructionLength)) pc := startAddr var curpc uint64 @@ -58,15 +59,15 @@ func disassemble(memrw MemoryReadWriter, regs Registers, breakpoints *Breakpoint inst, err := asmDecode(mem, pc) if err == nil { atpc := (regs != nil) && (curpc == pc) - destloc := resolveCallArg(inst, atpc, regs, memrw, bi) - r = append(r, AsmInstruction{Loc: loc, DestLoc: destloc, Bytes: mem[:inst.Len], Breakpoint: atbp, AtPC: atpc, Inst: inst}) + destloc := resolveCallArg(inst, pc, atpc, regs, memrw, bi) + r = append(r, AsmInstruction{Loc: loc, DestLoc: destloc, Bytes: mem[:inst.Size()], Breakpoint: atbp, AtPC: atpc, Inst: inst}) pc += uint64(inst.Size()) mem = mem[inst.Size():] } else { - r = append(r, AsmInstruction{Loc: loc, Bytes: mem[:1], Breakpoint: atbp, Inst: nil}) - pc++ - mem = mem[1:] + r = append(r, AsmInstruction{Loc: loc, Bytes: mem[:minInstructionLength], Breakpoint: atbp, Inst: nil}) + pc += uint64(minInstructionLength) + mem = mem[minInstructionLength:] } if singleInstr { break diff --git a/pkg/proc/disasm_amd64.go b/pkg/proc/disasm_amd64.go index bc5ba2cb..fd382568 100644 --- a/pkg/proc/disasm_amd64.go +++ b/pkg/proc/disasm_amd64.go @@ -1,3 +1,7 @@ +// TODO: disassembler support should be compiled in unconditionally, +// instead of being decided by the build-target architecture, and be +// part of the Arch object instead. + package proc import ( @@ -73,7 +77,7 @@ func (inst *AsmInstruction) IsRet() bool { return inst.Inst.Op == x86asm.RET || inst.Inst.Op == x86asm.LRET } -func resolveCallArg(inst *archInst, currentGoroutine bool, regs Registers, mem MemoryReadWriter, bininfo *BinaryInfo) *Location { +func resolveCallArg(inst *archInst, instAddr uint64, currentGoroutine bool, regs Registers, mem MemoryReadWriter, bininfo *BinaryInfo) *Location { if inst.Op != x86asm.CALL && inst.Op != x86asm.LCALL { return nil } diff --git a/pkg/proc/disasm_arm64.go b/pkg/proc/disasm_arm64.go new file mode 100644 index 00000000..39ac3720 --- /dev/null +++ b/pkg/proc/disasm_arm64.go @@ -0,0 +1,168 @@ +// TODO: disassembler support should be compiled in unconditionally, +// instead of being decided by the build-target architecture, and be +// part of the Arch object instead. + +package proc + +import ( + //"encoding/binary" + + "golang.org/x/arch/arm64/arm64asm" +) + +var maxInstructionLength uint64 = 4 + +type archInst arm64asm.Inst + +func asmDecode(mem []byte, pc uint64) (*archInst, error) { + inst, err := arm64asm.Decode(mem) + if err != nil { + return nil, err + } + + r := archInst(inst) + return &r, nil +} + +func (inst *archInst) Size() int { + return 4 +} + +// Text will return the assembly instructions in human readable format according to +// the flavour specified. +func (inst *AsmInstruction) Text(flavour AssemblyFlavour, bi *BinaryInfo) string { + if inst.Inst == nil { + return "?" + } + + var text string + + switch flavour { + case GNUFlavour: + text = arm64asm.GNUSyntax(arm64asm.Inst(*inst.Inst)) + default: + text = arm64asm.GoSyntax(arm64asm.Inst(*inst.Inst), inst.Loc.PC, bi.symLookup, nil) + } + + return text +} + +// IsCall returns true if the instruction is a BL or BLR instruction. +func (inst *AsmInstruction) IsCall() bool { + if inst.Inst == nil { + return false + } + return inst.Inst.Op == arm64asm.BL || inst.Inst.Op == arm64asm.BLR +} + +// IsRet returns true if the instruction is a RET or ERET instruction. +func (inst *AsmInstruction) IsRet() bool { + if inst.Inst == nil { + return false + } + return inst.Inst.Op == arm64asm.RET || inst.Inst.Op == arm64asm.ERET +} + +func resolveCallArg(inst *archInst, instAddr uint64, currentGoroutine bool, regs Registers, mem MemoryReadWriter, bininfo *BinaryInfo) *Location { + if inst.Op != arm64asm.BL && inst.Op != arm64asm.BLR { + return nil + } + + var pc uint64 + var err error + + switch arg := inst.Args[0].(type) { + case arm64asm.Imm: + pc = uint64(arg.Imm) + case arm64asm.Reg: + if !currentGoroutine || regs == nil { + return nil + } + pc, err = regs.Get(int(arg)) + if err != nil { + return nil + } + case arm64asm.PCRel: + pc = uint64(instAddr) + uint64(arg) + default: + return nil + } + + file, line, fn := bininfo.PCToLine(pc) + if fn == nil { + return &Location{PC: pc} + } + return &Location{PC: pc, File: file, Line: line, Fn: fn} +} + +type instrseq []arm64asm.Op + +// Possible stacksplit prologues are inserted by stacksplit in +// $GOROOT/src/cmd/internal/obj/arm64/obj7.go. +// The stacksplit prologue will always begin with loading curg in CX, this +// instruction is added by load_g_cx in the same file and is either 1 or 2 +// MOVs. +var prologues []instrseq + +func init() { + var tinyStacksplit = instrseq{arm64asm.MOV, arm64asm.CMP, arm64asm.B} + var smallStacksplit = instrseq{arm64asm.SUB, arm64asm.CMP, arm64asm.B} + var bigStacksplit = instrseq{arm64asm.CMP, arm64asm.B, arm64asm.ADD, arm64asm.SUB, arm64asm.MOV, arm64asm.CMP, arm64asm.B} + var unixGetG = instrseq{arm64asm.MOV} + + prologues = make([]instrseq, 0, 3) + for _, getG := range []instrseq{unixGetG} { + for _, stacksplit := range []instrseq{tinyStacksplit, smallStacksplit, bigStacksplit} { + prologue := make(instrseq, 0, len(getG)+len(stacksplit)) + prologue = append(prologue, getG...) + prologue = append(prologue, stacksplit...) + prologues = append(prologues, prologue) + } + } +} + +// firstPCAfterPrologueDisassembly returns the address of the first +// instruction after the prologue for function fn by disassembling fn and +// matching the instructions against known split-stack prologue patterns. +// If sameline is set firstPCAfterPrologueDisassembly will always return an +// address associated with the same line as fn.Entry +func firstPCAfterPrologueDisassembly(p Process, fn *Function, sameline bool) (uint64, error) { + var mem MemoryReadWriter = p.CurrentThread() + breakpoints := p.Breakpoints() + bi := p.BinInfo() + text, err := disassemble(mem, nil, breakpoints, bi, fn.Entry, fn.End, false) + if err != nil { + return fn.Entry, err + } + + if len(text) <= 0 { + return fn.Entry, nil + } + + for _, prologue := range prologues { + if len(prologue) >= len(text) { + continue + } + if checkPrologue(text, prologue) { + r := &text[len(prologue)] + if sameline { + if r.Loc.Line != text[0].Loc.Line { + return fn.Entry, nil + } + } + return r.Loc.PC, nil + } + } + + return fn.Entry, nil +} + +func checkPrologue(s []AsmInstruction, prologuePattern instrseq) bool { + line := s[0].Loc.Line + for i, op := range prologuePattern { + if s[i].Inst.Op != op || s[i].Loc.Line != line { + return false + } + } + return true +} diff --git a/pkg/proc/linutil/regs.go b/pkg/proc/linutil/regs_amd64_arch.go similarity index 100% rename from pkg/proc/linutil/regs.go rename to pkg/proc/linutil/regs_amd64_arch.go diff --git a/pkg/proc/linutil/regs_arm64_arch.go b/pkg/proc/linutil/regs_arm64_arch.go new file mode 100644 index 00000000..ad1ed502 --- /dev/null +++ b/pkg/proc/linutil/regs_arm64_arch.go @@ -0,0 +1,139 @@ +package linutil + +import ( + "fmt" + "golang.org/x/arch/arm64/arm64asm" + + "github.com/go-delve/delve/pkg/proc" +) + +// Regs is a wrapper for sys.PtraceRegs. +type ARM64Registers struct { + Regs *ARM64PtraceRegs //general-purpose registers + Fpregs []proc.Register //Formatted floating point registers + Fpregset []byte //holding all floating point register values +} + +// ARM64PtraceRegs is the struct used by the linux kernel to return the +// general purpose registers for ARM64 CPUs. +//copy form sys/unix/ztypes_linux_arm64.go:735 +type ARM64PtraceRegs struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +// Slice returns the registers as a list of (name, value) pairs. +func (r *ARM64Registers) Slice(floatingPoint bool) []proc.Register { + var regs64 = []struct { + k string + v uint64 + }{ + {"X0", r.Regs.Regs[0]}, + {"X1", r.Regs.Regs[1]}, + {"X2", r.Regs.Regs[2]}, + {"X3", r.Regs.Regs[3]}, + {"X4", r.Regs.Regs[4]}, + {"X5", r.Regs.Regs[5]}, + {"X6", r.Regs.Regs[6]}, + {"X7", r.Regs.Regs[7]}, + {"X8", r.Regs.Regs[8]}, + {"X9", r.Regs.Regs[9]}, + {"X10", r.Regs.Regs[10]}, + {"X11", r.Regs.Regs[11]}, + {"X12", r.Regs.Regs[12]}, + {"X13", r.Regs.Regs[13]}, + {"X14", r.Regs.Regs[14]}, + {"X15", r.Regs.Regs[15]}, + {"X16", r.Regs.Regs[16]}, + {"X17", r.Regs.Regs[17]}, + {"X18", r.Regs.Regs[18]}, + {"X19", r.Regs.Regs[19]}, + {"X20", r.Regs.Regs[20]}, + {"X21", r.Regs.Regs[21]}, + {"X22", r.Regs.Regs[22]}, + {"X23", r.Regs.Regs[23]}, + {"X24", r.Regs.Regs[24]}, + {"X25", r.Regs.Regs[25]}, + {"X26", r.Regs.Regs[26]}, + {"X27", r.Regs.Regs[27]}, + {"X28", r.Regs.Regs[28]}, + {"X29", r.Regs.Regs[29]}, + {"X30", r.Regs.Regs[30]}, + {"SP", r.Regs.Sp}, + {"PC", r.Regs.Pc}, + {"PSTATE", r.Regs.Pstate}, + } + out := make([]proc.Register, 0, len(regs64)+len(r.Fpregs)) + for _, reg := range regs64 { + out = proc.AppendQwordReg(out, reg.k, reg.v) + } + out = append(out, r.Fpregs...) + return out +} + +// PC returns the value of RIP register. +func (r *ARM64Registers) PC() uint64 { + return r.Regs.Pc +} + +// SP returns the value of RSP register. +func (r *ARM64Registers) SP() uint64 { + return r.Regs.Sp +} + +func (r *ARM64Registers) BP() uint64 { + return r.Regs.Regs[29] +} + +// CX returns the value of RCX register. +func (r *ARM64Registers) CX() uint64 { + return 0 +} + +// TLS returns the address of the thread local storage memory segment. +func (r *ARM64Registers) TLS() uint64 { + return 0 +} + +// GAddr returns the address of the G variable if it is known, 0 and false +// otherwise. +func (r *ARM64Registers) GAddr() (uint64, bool) { + return r.Regs.Regs[28], true +} + +// Get returns the value of the n-th register (in x86asm order). +func (r *ARM64Registers) Get(n int) (uint64, error) { + reg := arm64asm.Reg(n) + + if reg >= arm64asm.X0 && reg <= arm64asm.X30 { + return r.Regs.Regs[reg-arm64asm.X0], nil + } + + return 0, proc.ErrUnknownRegister +} + +// Copy returns a copy of these registers that is guarenteed not to change. +func (r *ARM64Registers) Copy() proc.Registers { + var rr ARM64Registers + rr.Regs = &ARM64PtraceRegs{} + *(rr.Regs) = *(r.Regs) + if r.Fpregs != nil { + rr.Fpregs = make([]proc.Register, len(r.Fpregs)) + copy(rr.Fpregs, r.Fpregs) + } + if r.Fpregset != nil { + rr.Fpregset = make([]byte, len(r.Fpregset)) + copy(rr.Fpregset, r.Fpregset) + } + return &rr +} + +// Decode decodes an XSAVE area to a list of name/value pairs of registers. +func Decode(fpregs []byte) (regs []proc.Register) { + for i := 0; i < len(fpregs); i += 16 { + regs = proc.AppendFPReg(regs, fmt.Sprintf("V%d", i/16), fpregs[i:i+16]) + } + return +} diff --git a/pkg/proc/native/registers_linux_arm64.go b/pkg/proc/native/registers_linux_arm64.go new file mode 100644 index 00000000..f7fea0c2 --- /dev/null +++ b/pkg/proc/native/registers_linux_arm64.go @@ -0,0 +1,103 @@ +package native + +import ( + "debug/elf" + "fmt" + "syscall" + "unsafe" + + sys "golang.org/x/sys/unix" + + "github.com/go-delve/delve/pkg/proc" + "github.com/go-delve/delve/pkg/proc/linutil" +) + +const ( + AARCH64_GREGS_SIZE = 34 * 8 + AARCH64_FPREGS_SIZE = 32*16 + 8 +) + +func ptraceGetGRegs(pid int, regs *linutil.ARM64PtraceRegs) (err error) { + iov := sys.Iovec{Base: (*byte)(unsafe.Pointer(regs)), Len: AARCH64_GREGS_SIZE} + _, _, err = syscall.Syscall6(syscall.SYS_PTRACE, sys.PTRACE_GETREGSET, uintptr(pid), uintptr(elf.NT_PRSTATUS), uintptr(unsafe.Pointer(&iov)), 0, 0) + if err == syscall.Errno(0) { + err = nil + } + return +} + +func ptraceSetGRegs(pid int, regs *linutil.ARM64PtraceRegs) (err error) { + iov := sys.Iovec{Base: (*byte)(unsafe.Pointer(regs)), Len: AARCH64_GREGS_SIZE} + _, _, err = syscall.Syscall6(syscall.SYS_PTRACE, sys.PTRACE_SETREGSET, uintptr(pid), uintptr(elf.NT_PRSTATUS), uintptr(unsafe.Pointer(&iov)), 0, 0) + if err == syscall.Errno(0) { + err = nil + } + return +} + +// PtraceGetRegset returns floating point registers of the specified thread +// using PTRACE. +func PtraceGetFpRegset(tid int) (fpregset []byte, err error) { + var arm64_fpregs [AARCH64_FPREGS_SIZE]byte + iov := sys.Iovec{Base: &arm64_fpregs[0], Len: AARCH64_FPREGS_SIZE} + _, _, err = syscall.Syscall6(syscall.SYS_PTRACE, sys.PTRACE_GETREGSET, uintptr(tid), uintptr(elf.NT_FPREGSET), uintptr(unsafe.Pointer(&iov)), 0, 0) + if err != syscall.Errno(0) { + if err == syscall.ENODEV { + err = nil + } + return + } else { + err = nil + } + + fpregset = arm64_fpregs[:iov.Len-8] + return fpregset, err +} + +// SetPC sets PC to the value specified by 'pc'. +func (thread *Thread) SetPC(pc uint64) error { + ir, err := registers(thread, false) + if err != nil { + return err + } + r := ir.(*linutil.ARM64Registers) + r.Regs.Pc = pc + thread.dbp.execPtraceFunc(func() { err = ptraceSetGRegs(thread.ID, r.Regs) }) + return err +} + +// SetSP sets RSP to the value specified by 'sp' +func (thread *Thread) SetSP(sp uint64) (err error) { + var ir proc.Registers + ir, err = registers(thread, false) + if err != nil { + return err + } + r := ir.(*linutil.ARM64Registers) + r.Regs.Sp = sp + thread.dbp.execPtraceFunc(func() { err = ptraceSetGRegs(thread.ID, r.Regs) }) + return +} + +func (thread *Thread) SetDX(dx uint64) (err error) { + return fmt.Errorf("not supported") +} + +func registers(thread *Thread, floatingPoint bool) (proc.Registers, error) { + var ( + regs linutil.ARM64PtraceRegs + err error + ) + thread.dbp.execPtraceFunc(func() { err = ptraceGetGRegs(thread.ID, ®s) }) + if err != nil { + return nil, err + } + r := &linutil.ARM64Registers{®s, nil, nil} + if floatingPoint { + r.Fpregs, r.Fpregset, err = thread.fpRegisters() + if err != nil { + return nil, err + } + } + return r, nil +} diff --git a/pkg/proc/native/threads_linux_arm64.go b/pkg/proc/native/threads_linux_arm64.go new file mode 100644 index 00000000..d07bd8c4 --- /dev/null +++ b/pkg/proc/native/threads_linux_arm64.go @@ -0,0 +1,43 @@ +package native + +import ( + "debug/elf" + "fmt" + "syscall" + "unsafe" + + sys "golang.org/x/sys/unix" + + "github.com/go-delve/delve/pkg/proc" + "github.com/go-delve/delve/pkg/proc/linutil" +) + +func (thread *Thread) fpRegisters() (fpregs []proc.Register, fpregset []byte, err error) { + thread.dbp.execPtraceFunc(func() { fpregset, err = PtraceGetFpRegset(thread.ID) }) + fpregs = linutil.Decode(fpregset) + if err != nil { + err = fmt.Errorf("could not get floating point registers: %v", err.Error()) + } + return +} + +func (t *Thread) restoreRegisters(savedRegs proc.Registers) error { + sr := savedRegs.(*linutil.ARM64Registers) + + var restoreRegistersErr error + t.dbp.execPtraceFunc(func() { + restoreRegistersErr = ptraceSetGRegs(t.ID, sr.Regs) + if restoreRegistersErr != syscall.Errno(0) { + return + } + if sr.Fpregset != nil { + iov := sys.Iovec{Base: &sr.Fpregset[0], Len: uint64(len(sr.Fpregset))} + _, _, restoreRegistersErr = syscall.Syscall6(syscall.SYS_PTRACE, sys.PTRACE_SETREGSET, uintptr(t.ID), uintptr(elf.NT_FPREGSET), uintptr(unsafe.Pointer(&iov)), 0, 0) + } + //return + }) + if restoreRegistersErr == syscall.Errno(0) { + restoreRegistersErr = nil + } + return restoreRegistersErr +} diff --git a/pkg/proc/registers.go b/pkg/proc/registers.go index 77765239..c6fa1390 100644 --- a/pkg/proc/registers.go +++ b/pkg/proc/registers.go @@ -170,6 +170,43 @@ func AppendSSEReg(regs []Register, name string, xmm []byte) []Register { return append(regs, Register{name, xmm, out.String()}) } +// AppendFPReg appends a 128 bit FP register to regs. +func AppendFPReg(regs []Register, name string, reg_value []byte) []Register { + buf := bytes.NewReader(reg_value) + + var out bytes.Buffer + var vi [16]uint8 + for i := range vi { + binary.Read(buf, binary.LittleEndian, &vi[i]) + } + + fmt.Fprintf(&out, "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", vi[15], vi[14], vi[13], vi[12], vi[11], vi[10], vi[9], vi[8], vi[7], vi[6], vi[5], vi[4], vi[3], vi[2], vi[1], vi[0]) + + fmt.Fprintf(&out, "\tv2_int={ %02x%02x%02x%02x%02x%02x%02x%02x %02x%02x%02x%02x%02x%02x%02x%02x }", vi[7], vi[6], vi[5], vi[4], vi[3], vi[2], vi[1], vi[0], vi[15], vi[14], vi[13], vi[12], vi[11], vi[10], vi[9], vi[8]) + + fmt.Fprintf(&out, "\tv4_int={ %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x }", vi[3], vi[2], vi[1], vi[0], vi[7], vi[6], vi[5], vi[4], vi[11], vi[10], vi[9], vi[8], vi[15], vi[14], vi[13], vi[12]) + + fmt.Fprintf(&out, "\tv8_int={ %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x }", vi[1], vi[0], vi[3], vi[2], vi[5], vi[4], vi[7], vi[6], vi[9], vi[8], vi[11], vi[10], vi[13], vi[12], vi[15], vi[14]) + + fmt.Fprintf(&out, "\tv16_int={ %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x }", vi[0], vi[1], vi[2], vi[3], vi[4], vi[5], vi[6], vi[7], vi[8], vi[9], vi[10], vi[11], vi[12], vi[13], vi[14], vi[15]) + + buf.Seek(0, os.SEEK_SET) + var v2 [2]float64 + for i := range v2 { + binary.Read(buf, binary.LittleEndian, &v2[i]) + } + fmt.Fprintf(&out, "\tv2_float={ %g %g }", v2[0], v2[1]) + + buf.Seek(0, os.SEEK_SET) + var v4 [4]float32 + for i := range v4 { + binary.Read(buf, binary.LittleEndian, &v4[i]) + } + fmt.Fprintf(&out, "\tv4_float={ %g %g %g %g }", v4[0], v4[1], v4[2], v4[3]) + + return append(regs, Register{name, reg_value, out.String()}) +} + // ErrUnknownRegister is returned when the value of an unknown // register is requested. var ErrUnknownRegister = errors.New("unknown register") diff --git a/pkg/proc/registers_amd64.go b/pkg/proc/registers_amd64.go deleted file mode 100644 index 8ef4658a..00000000 --- a/pkg/proc/registers_amd64.go +++ /dev/null @@ -1,90 +0,0 @@ -package proc - -import ( - "bytes" - "encoding/binary" - - "golang.org/x/arch/x86/x86asm" -) - -var dwarfToAsm = map[int]x86asm.Reg{ - 0: x86asm.RAX, - 1: x86asm.RDX, - 2: x86asm.RCX, - 3: x86asm.RBX, - 4: x86asm.RSI, - 5: x86asm.RDI, - 6: x86asm.RBP, - 7: x86asm.RSP, - 8: x86asm.R8, - 9: x86asm.R9, - 10: x86asm.R10, - 11: x86asm.R11, - 12: x86asm.R12, - 13: x86asm.R13, - 14: x86asm.R14, - 15: x86asm.R15, - 16: x86asm.RIP, -} - -var dwarfToName = map[int]string{ - 17: "XMM0", - 18: "XMM1", - 19: "XMM2", - 20: "XMM3", - 21: "XMM4", - 22: "XMM5", - 23: "XMM6", - 24: "XMM7", - 25: "XMM8", - 26: "XMM9", - 27: "XMM10", - 28: "XMM11", - 29: "XMM12", - 30: "XMM13", - 31: "XMM14", - 32: "XMM15", - 33: "ST(0)", - 34: "ST(1)", - 35: "ST(2)", - 36: "ST(3)", - 37: "ST(4)", - 38: "ST(5)", - 39: "ST(6)", - 40: "ST(7)", - 49: "Eflags", - 50: "Es", - 51: "Cs", - 52: "Ss", - 53: "Ds", - 54: "Fs", - 55: "Gs", - 58: "Fs_base", - 59: "Gs_base", - 64: "MXCSR", - 65: "CW", - 66: "SW", -} - -// GetDwarfRegister maps between DWARF register numbers and architecture -// registers. -// The mapping is specified in the System V ABI AMD64 Architecture Processor -// Supplement page 57, figure 3.36 -// https://www.uclibc.org/docs/psABI-x86_64.pdf -func GetDwarfRegister(regs Registers, i int) []byte { - if asmreg, ok := dwarfToAsm[i]; ok { - x, _ := regs.Get(int(asmreg)) - var buf bytes.Buffer - binary.Write(&buf, binary.LittleEndian, x) - return buf.Bytes() - } - if regname, ok := dwarfToName[i]; ok { - regslice := regs.Slice(true) - for _, reg := range regslice { - if reg.Name == regname { - return reg.Bytes - } - } - } - return []byte{} -} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/arg.go b/vendor/golang.org/x/arch/arm64/arm64asm/arg.go new file mode 100755 index 00000000..96df14df --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/arg.go @@ -0,0 +1,494 @@ +// Generated by ARM internal tool +// DO NOT EDIT + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +// Naming for Go decoder arguments: +// +// - arg_Wd: a W register encoded in the Rd[4:0] field (31 is wzr) +// +// - arg_Xd: a X register encoded in the Rd[4:0] field (31 is xzr) +// +// - arg_Wds: a W register encoded in the Rd[4:0] field (31 is wsp) +// +// - arg_Xds: a X register encoded in the Rd[4:0] field (31 is sp) +// +// - arg_Wn: encoded in Rn[9:5] +// +// - arg_Wm: encoded in Rm[20:16] +// +// - arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: +// a W register encoded in Rm with an extend encoded in option[15:13] and an amount +// encoded in imm3[12:10] in the range [0,4]. +// +// - arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: +// a W or X register encoded in Rm with an extend encoded in option[15:13] and an +// amount encoded in imm3[12:10] in the range [0,4]. If the extend is UXTX or SXTX, +// it's an X register else, it's a W register. +// +// - arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31: +// a W register encoded in Rm with a shift encoded in shift[23:22] and an amount +// encoded in imm6[15:10] in the range [0,31]. +// +// - arg_IAddSub: +// An immediate for a add/sub instruction encoded in imm12[21:10] with an optional +// left shift of 12 encoded in shift[23:22]. +// +// - arg_Rt_31_1__W_0__X_1: +// a W or X register encoded in Rt[4:0]. The width specifier is encoded in the field +// [31:31] (offset 31, bit count 1) and the register is W for 0 and X for 1. +// +// - arg_[s|u]label_FIELDS_POWER: +// a program label encoded as "FIELDS" times 2^POWER in the range [MIN, MAX] (determined +// by signd/unsigned, FIELDS and POWER), e.g. +// arg_slabel_imm14_2 +// arg_slabel_imm19_2 +// arg_slabel_imm26_2 +// arg_slabel_immhi_immlo_0 +// arg_slabel_immhi_immlo_12 +// +// - arg_Xns_mem_post_imm7_8_signed: +// addressing mode of post-index with a base register: Xns and a signed offset encoded +// in the "imm7" field times 8 +// +// - arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1: +// addressing mode of extended register with a base register: Xns, an offset register +// (|) with an extend encoded in option[15:13] and a shift amount encoded in +// S[12:12] in the range [0,3] (S=0:0, S=1:3). +// +// - arg_Xns_mem_optional_imm12_4_unsigned: +// addressing mode of unsigned offset with a base register: Xns and an optional unsigned +// offset encoded in the "imm12" field times 4 +// +// - arg_Xns_mem_wb_imm7_4_signed: +// addressing mode of pre-index with a base register: Xns and the signed offset encoded +// in the "imm7" field times 4 +// +// - arg_Xns_mem_post_size_1_8_unsigned__4_0__8_1__16_2__32_3: +// a post-index immediate offset, encoded in the "size" field. It can have the following values: +// #4 when size = 00 +// #8 when size = 01 +// #16 when size = 10 +// #32 when size = 11 +// +// - arg_immediate_0_127_CRm_op2: +// an immediate encoded in "CRm:op2" in the range 0 to 127 +// +// - arg_immediate_bitmask_64_N_imms_immr: +// a bitmask immediate for 64-bit variant and encoded in "N:imms:immr" +// +// - arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms: +// an immediate for the bitfield of SBFX 64-bit variant +// +// - arg_immediate_shift_32_implicit_inverse_imm16_hw: +// a 32-bit immediate of the bitwise inverse of which can be encoded in "imm16:hw" +// +// - arg_cond_[Not]AllowALNV_[Invert|Normal]: +// a standard condition, encoded in the "cond" field, excluding (NotAllow) AL and NV with +// its least significant bit [Yes|No] inverted, e.g. +// arg_cond_AllowALNV_Normal +// arg_cond_NotAllowALNV_Invert +// +// - arg_immediate_OptLSL_amount_16_0_48: +// An immediate for MOV[KNZ] instruction encoded in imm16[20:5] with an optional +// left shift of 16 in the range [0, 48] encoded in hw[22, 21] +// +// - arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: +// the left shift amount, in the range 0 to the operand width in bits minus 1, +// encoded in the "immh:immb" field. It can have the following values: +// (UInt(immh:immb)-8) when immh = 0001 +// (UInt(immh:immb)-16) when immh = 001x +// (UInt(immh:immb)-32) when immh = 01xx +// (UInt(immh:immb)-64) when immh = 1xxx +// +// - arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: +// the right shift amount, in the range 1 to the destination operand width in +// bits, encoded in the "immh:immb" field. It can have the following values: +// (16-UInt(immh:immb)) when immh = 0001 +// (32-UInt(immh:immb)) when immh = 001x +// (64-UInt(immh:immb)) when immh = 01xx +// +// - arg_immediate_8x8_a_b_c_d_e_f_g_h: +// a 64-bit immediate 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh', +// encoded in "a:b:c:d:e:f:g:h". +// +// - arg_immediate_fbits_min_1_max_32_sub_64_scale: +// the number of bits after the binary point in the fixed-point destination, +// in the range 1 to 32, encoded as 64 minus "scale". +// +// - arg_immediate_floatzero: #0.0 +// +// - arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h: +// a signed floating-point constant with 3-bit exponent and normalized 4 bits of precision, +// encoded in "a:b:c:d:e:f:g:h" +// +// - arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8: +// the number of fractional bits, in the range 1 to the operand width, encoded +// in the "immh:immb" field. It can have the following values: +// (64-UInt(immh:immb)) when immh = 01xx +// (128-UInt(immh:immb)) when immh = 1xxx +// +// - arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10: +// the lowest numbered byte element to be extracted, encoded in the "Q:imm4" field. +// It can have the following values: +// imm4<2:0> when Q = 0, imm4<3> = 0 +// imm4 when Q = 1, imm4<3> = x +// +// - arg_sysop_AT_SYS_CR_system: +// system operation for system instruction: AT encoded in the "op1:CRm<0>:op2" field +// +// - arg_prfop_Rt: +// prefectch operation encoded in the "Rt" +// +// - arg_sysreg_o0_op1_CRn_CRm_op2: +// system register name encoded in the "o0:op1:CRn:CRm:op2" +// +// - arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37: +// PSTATE field name encoded in the "op1:op2" field +// +// - arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: +// one register with arrangement specifier encoded in the "size:Q" field which can have the following values: +// 8B when size = 00, Q = 0 +// 16B when size = 00, Q = 1 +// 4H when size = 01, Q = 0 +// 8H when size = 01, Q = 1 +// 2S when size = 10, Q = 0 +// 4S when size = 10, Q = 1 +// 2D when size = 11, Q = 1 +// The encoding size = 11, Q = 0 is reserved. +// +// - arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: +// three registers with arrangement specifier encoded in the "size:Q" field which can have the following values: +// 8B when size = 00, Q = 0 +// 16B when size = 00, Q = 1 +// 4H when size = 01, Q = 0 +// 8H when size = 01, Q = 1 +// 2S when size = 10, Q = 0 +// 4S when size = 10, Q = 1 +// 2D when size = 11, Q = 1 +// The encoding size = 11, Q = 0 is reserved. +// +// - arg_Vt_1_arrangement_H_index__Q_S_size_1: +// one register with arrangement:H and element index encoded in "Q:S:size<1>". + +type instArg uint16 + +const ( + _ instArg = iota + arg_Bt + arg_Cm + arg_Cn + arg_cond_AllowALNV_Normal + arg_conditional + arg_cond_NotAllowALNV_Invert + arg_Da + arg_Dd + arg_Dm + arg_Dn + arg_Dt + arg_Dt2 + arg_Hd + arg_Hn + arg_Ht + arg_IAddSub + arg_immediate_0_127_CRm_op2 + arg_immediate_0_15_CRm + arg_immediate_0_15_nzcv + arg_immediate_0_31_imm5 + arg_immediate_0_31_immr + arg_immediate_0_31_imms + arg_immediate_0_63_b5_b40 + arg_immediate_0_63_immh_immb__UIntimmhimmb64_8 + arg_immediate_0_63_immr + arg_immediate_0_63_imms + arg_immediate_0_65535_imm16 + arg_immediate_0_7_op1 + arg_immediate_0_7_op2 + arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4 + arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8 + arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8 + arg_immediate_0_width_size__8_0__16_1__32_2 + arg_immediate_1_64_immh_immb__128UIntimmhimmb_8 + arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4 + arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4 + arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_8x8_a_b_c_d_e_f_g_h + arg_immediate_ASR_SBFM_32M_bitfield_0_31_immr + arg_immediate_ASR_SBFM_64M_bitfield_0_63_immr + arg_immediate_BFI_BFM_32M_bitfield_lsb_32_immr + arg_immediate_BFI_BFM_32M_bitfield_width_32_imms + arg_immediate_BFI_BFM_64M_bitfield_lsb_64_immr + arg_immediate_BFI_BFM_64M_bitfield_width_64_imms + arg_immediate_BFXIL_BFM_32M_bitfield_lsb_32_immr + arg_immediate_BFXIL_BFM_32M_bitfield_width_32_imms + arg_immediate_BFXIL_BFM_64M_bitfield_lsb_64_immr + arg_immediate_BFXIL_BFM_64M_bitfield_width_64_imms + arg_immediate_bitmask_32_imms_immr + arg_immediate_bitmask_64_N_imms_immr + arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h + arg_immediate_exp_3_pre_4_imm8 + arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_fbits_min_1_max_32_sub_64_scale + arg_immediate_fbits_min_1_max_64_sub_64_scale + arg_immediate_floatzero + arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10 + arg_immediate_LSL_UBFM_32M_bitfield_0_31_immr + arg_immediate_LSL_UBFM_64M_bitfield_0_63_immr + arg_immediate_LSR_UBFM_32M_bitfield_0_31_immr + arg_immediate_LSR_UBFM_64M_bitfield_0_63_immr + arg_immediate_MSL__a_b_c_d_e_f_g_h_cmode__8_0__16_1 + arg_immediate_optional_0_15_CRm + arg_immediate_optional_0_65535_imm16 + arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1 + arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1__16_2__24_3 + arg_immediate_OptLSL_amount_16_0_16 + arg_immediate_OptLSL_amount_16_0_48 + arg_immediate_OptLSLZero__a_b_c_d_e_f_g_h + arg_immediate_SBFIZ_SBFM_32M_bitfield_lsb_32_immr + arg_immediate_SBFIZ_SBFM_32M_bitfield_width_32_imms + arg_immediate_SBFIZ_SBFM_64M_bitfield_lsb_64_immr + arg_immediate_SBFIZ_SBFM_64M_bitfield_width_64_imms + arg_immediate_SBFX_SBFM_32M_bitfield_lsb_32_immr + arg_immediate_SBFX_SBFM_32M_bitfield_width_32_imms + arg_immediate_SBFX_SBFM_64M_bitfield_lsb_64_immr + arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms + arg_immediate_shift_32_implicit_imm16_hw + arg_immediate_shift_32_implicit_inverse_imm16_hw + arg_immediate_shift_64_implicit_imm16_hw + arg_immediate_shift_64_implicit_inverse_imm16_hw + arg_immediate_UBFIZ_UBFM_32M_bitfield_lsb_32_immr + arg_immediate_UBFIZ_UBFM_32M_bitfield_width_32_imms + arg_immediate_UBFIZ_UBFM_64M_bitfield_lsb_64_immr + arg_immediate_UBFIZ_UBFM_64M_bitfield_width_64_imms + arg_immediate_UBFX_UBFM_32M_bitfield_lsb_32_immr + arg_immediate_UBFX_UBFM_32M_bitfield_width_32_imms + arg_immediate_UBFX_UBFM_64M_bitfield_lsb_64_immr + arg_immediate_UBFX_UBFM_64M_bitfield_width_64_imms + arg_immediate_zero + arg_option_DMB_BO_system_CRm + arg_option_DSB_BO_system_CRm + arg_option_ISB_BI_system_CRm + arg_prfop_Rt + arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37 + arg_Qd + arg_Qn + arg_Qt + arg_Qt2 + arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4 + arg_Rn_16_5__W_1__W_2__W_4__X_8 + arg_Rt_31_1__W_0__X_1 + arg_Sa + arg_Sd + arg_slabel_imm14_2 + arg_slabel_imm19_2 + arg_slabel_imm26_2 + arg_slabel_immhi_immlo_0 + arg_slabel_immhi_immlo_12 + arg_Sm + arg_Sn + arg_St + arg_St2 + arg_sysop_AT_SYS_CR_system + arg_sysop_DC_SYS_CR_system + arg_sysop_IC_SYS_CR_system + arg_sysop_SYS_CR_system + arg_sysop_TLBI_SYS_CR_system + arg_sysreg_o0_op1_CRn_CRm_op2 + arg_Vd_16_5__B_1__H_2__S_4__D_8 + arg_Vd_19_4__B_1__H_2__S_4 + arg_Vd_19_4__B_1__H_2__S_4__D_8 + arg_Vd_19_4__D_8 + arg_Vd_19_4__S_4__D_8 + arg_Vd_22_1__S_0 + arg_Vd_22_1__S_0__D_1 + arg_Vd_22_1__S_1 + arg_Vd_22_2__B_0__H_1__S_2 + arg_Vd_22_2__B_0__H_1__S_2__D_3 + arg_Vd_22_2__D_3 + arg_Vd_22_2__H_0__S_1__D_2 + arg_Vd_22_2__H_1__S_2 + arg_Vd_22_2__S_1__D_2 + arg_Vd_arrangement_16B + arg_Vd_arrangement_2D + arg_Vd_arrangement_4S + arg_Vd_arrangement_D_index__1 + arg_Vd_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1 + arg_Vd_arrangement_imm5_Q___8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4 + arg_Vd_arrangement_Q___2S_0__4S_1 + arg_Vd_arrangement_Q___4H_0__8H_1 + arg_Vd_arrangement_Q___8B_0__16B_1 + arg_Vd_arrangement_Q_sz___2S_00__4S_10__2D_11 + arg_Vd_arrangement_size___4S_1__2D_2 + arg_Vd_arrangement_size___8H_0__1Q_3 + arg_Vd_arrangement_size___8H_0__4S_1__2D_2 + arg_Vd_arrangement_size_Q___4H_00__8H_01__2S_10__4S_11__1D_20__2D_21 + arg_Vd_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vd_arrangement_size_Q___8B_00__16B_01 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vd_arrangement_sz___4S_0__2D_1 + arg_Vd_arrangement_sz_Q___2S_00__4S_01 + arg_Vd_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vd_arrangement_sz_Q___2S_10__4S_11 + arg_Vd_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11 + arg_Vm_22_1__S_0__D_1 + arg_Vm_22_2__B_0__H_1__S_2__D_3 + arg_Vm_22_2__D_3 + arg_Vm_22_2__H_1__S_2 + arg_Vm_arrangement_4S + arg_Vm_arrangement_Q___8B_0__16B_1 + arg_Vm_arrangement_size___8H_0__4S_1__2D_2 + arg_Vm_arrangement_size___H_1__S_2_index__size_L_H_M__HLM_1__HL_2_1 + arg_Vm_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vm_arrangement_size_Q___8B_00__16B_01 + arg_Vm_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31 + arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vm_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vm_arrangement_sz___S_0__D_1_index__sz_L_H__HL_00__H_10_1 + arg_Vn_19_4__B_1__H_2__S_4__D_8 + arg_Vn_19_4__D_8 + arg_Vn_19_4__H_1__S_2__D_4 + arg_Vn_19_4__S_4__D_8 + arg_Vn_1_arrangement_16B + arg_Vn_22_1__D_1 + arg_Vn_22_1__S_0__D_1 + arg_Vn_22_2__B_0__H_1__S_2__D_3 + arg_Vn_22_2__D_3 + arg_Vn_22_2__H_0__S_1__D_2 + arg_Vn_22_2__H_1__S_2 + arg_Vn_2_arrangement_16B + arg_Vn_3_arrangement_16B + arg_Vn_4_arrangement_16B + arg_Vn_arrangement_16B + arg_Vn_arrangement_4S + arg_Vn_arrangement_D_index__1 + arg_Vn_arrangement_D_index__imm5_1 + arg_Vn_arrangement_imm5___B_1__H_2_index__imm5__imm5lt41gt_1__imm5lt42gt_2_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5_imm4__imm4lt30gt_1__imm4lt31gt_2__imm4lt32gt_4__imm4lt3gt_8_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1 + arg_Vn_arrangement_imm5___D_8_index__imm5_1 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vn_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4 + arg_Vn_arrangement_Q___8B_0__16B_1 + arg_Vn_arrangement_Q_sz___2S_00__4S_10__2D_11 + arg_Vn_arrangement_Q_sz___4S_10 + arg_Vn_arrangement_S_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1 + arg_Vn_arrangement_size___2D_3 + arg_Vn_arrangement_size___8H_0__4S_1__2D_2 + arg_Vn_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vn_arrangement_size_Q___8B_00__16B_01 + arg_Vn_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__4S_21 + arg_Vn_arrangement_sz___2D_1 + arg_Vn_arrangement_sz___2S_0__2D_1 + arg_Vn_arrangement_sz___4S_0__2D_1 + arg_Vn_arrangement_sz_Q___2S_00__4S_01 + arg_Vn_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vn_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11 + arg_Vt_1_arrangement_B_index__Q_S_size_1 + arg_Vt_1_arrangement_D_index__Q_1 + arg_Vt_1_arrangement_H_index__Q_S_size_1 + arg_Vt_1_arrangement_S_index__Q_S_1 + arg_Vt_1_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_2_arrangement_B_index__Q_S_size_1 + arg_Vt_2_arrangement_D_index__Q_1 + arg_Vt_2_arrangement_H_index__Q_S_size_1 + arg_Vt_2_arrangement_S_index__Q_S_1 + arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vt_3_arrangement_B_index__Q_S_size_1 + arg_Vt_3_arrangement_D_index__Q_1 + arg_Vt_3_arrangement_H_index__Q_S_size_1 + arg_Vt_3_arrangement_S_index__Q_S_1 + arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vt_4_arrangement_B_index__Q_S_size_1 + arg_Vt_4_arrangement_D_index__Q_1 + arg_Vt_4_arrangement_H_index__Q_S_size_1 + arg_Vt_4_arrangement_S_index__Q_S_1 + arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Wa + arg_Wd + arg_Wds + arg_Wm + arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4 + arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31 + arg_Wm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_31 + arg_Wn + arg_Wns + arg_Ws + arg_Wt + arg_Wt2 + arg_Xa + arg_Xd + arg_Xds + arg_Xm + arg_Xm_shift__LSL_0__LSR_1__ASR_2__0_63 + arg_Xm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_63 + arg_Xn + arg_Xns + arg_Xns_mem + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__1_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__2_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__4_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__absent_0__0_1 + arg_Xns_mem_offset + arg_Xns_mem_optional_imm12_16_unsigned + arg_Xns_mem_optional_imm12_1_unsigned + arg_Xns_mem_optional_imm12_2_unsigned + arg_Xns_mem_optional_imm12_4_unsigned + arg_Xns_mem_optional_imm12_8_unsigned + arg_Xns_mem_optional_imm7_16_signed + arg_Xns_mem_optional_imm7_4_signed + arg_Xns_mem_optional_imm7_8_signed + arg_Xns_mem_optional_imm9_1_signed + arg_Xns_mem_post_fixedimm_1 + arg_Xns_mem_post_fixedimm_12 + arg_Xns_mem_post_fixedimm_16 + arg_Xns_mem_post_fixedimm_2 + arg_Xns_mem_post_fixedimm_24 + arg_Xns_mem_post_fixedimm_3 + arg_Xns_mem_post_fixedimm_32 + arg_Xns_mem_post_fixedimm_4 + arg_Xns_mem_post_fixedimm_6 + arg_Xns_mem_post_fixedimm_8 + arg_Xns_mem_post_imm7_16_signed + arg_Xns_mem_post_imm7_4_signed + arg_Xns_mem_post_imm7_8_signed + arg_Xns_mem_post_imm9_1_signed + arg_Xns_mem_post_Q__16_0__32_1 + arg_Xns_mem_post_Q__24_0__48_1 + arg_Xns_mem_post_Q__32_0__64_1 + arg_Xns_mem_post_Q__8_0__16_1 + arg_Xns_mem_post_size__1_0__2_1__4_2__8_3 + arg_Xns_mem_post_size__2_0__4_1__8_2__16_3 + arg_Xns_mem_post_size__3_0__6_1__12_2__24_3 + arg_Xns_mem_post_size__4_0__8_1__16_2__32_3 + arg_Xns_mem_post_Xm + arg_Xns_mem_wb_imm7_16_signed + arg_Xns_mem_wb_imm7_4_signed + arg_Xns_mem_wb_imm7_8_signed + arg_Xns_mem_wb_imm9_1_signed + arg_Xs + arg_Xt + arg_Xt2 +) diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/condition.go b/vendor/golang.org/x/arch/arm64/arm64asm/condition.go new file mode 100755 index 00000000..d6738572 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/condition.go @@ -0,0 +1,329 @@ +// Generated by ARM internal tool +// DO NOT EDIT + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +// Following functions are used as the predicator: canDecode of according instruction +// Refer to instFormat inside decode.go for more details + +func at_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, 0x8, (instr>>5)&0x7) == Sys_AT +} + +func bfi_bfm_32m_bitfield_cond(instr uint32) bool { + return (instr>>5)&0x1f != 0x1f && uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func bfi_bfm_64m_bitfield_cond(instr uint32) bool { + return (instr>>5)&0x1f != 0x1f && uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func bfxil_bfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) >= uint8((instr>>16)&0x3f) +} + +func bfxil_bfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) >= uint8((instr>>16)&0x3f) +} + +func cinc_csinc_32_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinc_csinc_64_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinv_csinv_32_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinv_csinv_64_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cneg_csneg_32_condsel_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cneg_csneg_64_condsel_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func csinc_general_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 +} +func csinv_general_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 +} +func dc_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_DC +} + +func ic_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_IC +} + +func lsl_ubfm_32m_bitfield_cond(instr uint32) bool { + return instr&0xfc00 != 0x7c00 && (instr>>10)&0x3f+1 == (instr>>16)&0x3f +} + +func lsl_ubfm_64m_bitfield_cond(instr uint32) bool { + return instr&0xfc00 != 0xfc00 && (instr>>10)&0x3f+1 == (instr>>16)&0x3f +} + +func mov_orr_32_log_imm_cond(instr uint32) bool { + return !move_wide_preferred_4((instr>>31)&0x1, (instr>>22)&0x1, (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func mov_orr_64_log_imm_cond(instr uint32) bool { + return !move_wide_preferred_4((instr>>31)&0x1, (instr>>22)&0x1, (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func mov_movn_32_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) && !is_ones_n16((instr>>5)&0xffff) +} + +func mov_movn_64_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func mov_add_32_addsub_imm_cond(instr uint32) bool { + return instr&0x1f == 0x1f || (instr>>5)&0x1f == 0x1f +} + +func mov_add_64_addsub_imm_cond(instr uint32) bool { + return instr&0x1f == 0x1f || (instr>>5)&0x1f == 0x1f +} + +func mov_movz_32_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func mov_movz_64_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func ror_extr_32_extract_cond(instr uint32) bool { + return (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func ror_extr_64_extract_cond(instr uint32) bool { + return (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func sbfiz_sbfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func sbfiz_sbfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func sbfx_sbfm_32m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func sbfx_sbfm_64m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func tlbi_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x8, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_TLBI +} + +func ubfiz_ubfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func ubfiz_ubfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func ubfx_ubfm_32m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func ubfx_ubfm_64m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func fcvtzs_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzs_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzu_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzu_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func mov_umov_asimdins_w_w_cond(instr uint32) bool { + return ((instr>>16)&0x1f)&0x7 == 0x4 +} + +func mov_umov_asimdins_x_x_cond(instr uint32) bool { + return ((instr>>16)&0x1f)&0xf == 0x8 +} + +func mov_orr_asimdsame_only_cond(instr uint32) bool { + return (instr>>16)&0x1f == (instr>>5)&0x1f +} + +func rshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func scvtf_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func scvtf_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sli_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sli_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrun_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrun_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshlu_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshlu_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrun_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrun_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sri_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sri_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srsra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srsra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ssra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ssra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sxtl_sshll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 && bit_count((instr>>19)&0xf) == 1 +} + +func ucvtf_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ucvtf_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqrshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqrshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func urshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func urshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ursra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ursra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func usra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func usra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uxtl_ushll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 && bit_count((instr>>19)&0xf) == 1 +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go b/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go new file mode 100755 index 00000000..62c0c3b0 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go @@ -0,0 +1,81 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +func extract_bit(value, bit uint32) uint32 { + return (value >> bit) & 1 +} + +func bfxpreferred_4(sf, opc1, imms, immr uint32) bool { + if imms < immr { + return false + } + if (imms>>5 == sf) && (imms&0x1f == 0x1f) { + return false + } + if immr == 0 { + if sf == 0 && (imms == 7 || imms == 15) { + return false + } + if sf == 1 && opc1 == 0 && (imms == 7 || + imms == 15 || imms == 31) { + return false + } + } + return true +} + +func move_wide_preferred_4(sf, N, imms, immr uint32) bool { + if sf == 1 && N != 1 { + return false + } + if sf == 0 && !(N == 0 && ((imms>>5)&1) == 0) { + return false + } + if imms < 16 { + return (-immr)%16 <= (15 - imms) + } + width := uint32(32) + if sf == 1 { + width = uint32(64) + } + if imms >= (width - 15) { + return (immr % 16) <= (imms - (width - 15)) + } + return false +} + +type Sys uint8 + +const ( + Sys_AT Sys = iota + Sys_DC + Sys_IC + Sys_TLBI + Sys_SYS +) + +func sys_op_4(op1, crn, crm, op2 uint32) Sys { + // TODO: system instruction + return Sys_SYS +} + +func is_zero(x uint32) bool { + return x == 0 +} + +func is_ones_n16(x uint32) bool { + return x == 0xffff +} + +func bit_count(x uint32) uint8 { + var count uint8 + for count = 0; x > 0; x >>= 1 { + if (x & 1) == 1 { + count++ + } + } + return count +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/decode.go b/vendor/golang.org/x/arch/arm64/arm64asm/decode.go new file mode 100755 index 00000000..5e29c476 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/decode.go @@ -0,0 +1,2768 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "encoding/binary" + "fmt" +) + +type instArgs [5]instArg + +// An instFormat describes the format of an instruction encoding. +// An instruction with 32-bit value x matches the format if x&mask == value +// and the predicator: canDecode(x) return true. +type instFormat struct { + mask uint32 + value uint32 + op Op + // args describe how to decode the instruction arguments. + // args is stored as a fixed-size array. + // if there are fewer than len(args) arguments, args[i] == 0 marks + // the end of the argument list. + args instArgs + canDecode func(instr uint32) bool +} + +var ( + errShort = fmt.Errorf("truncated instruction") + errUnknown = fmt.Errorf("unknown instruction") +) + +var decoderCover []bool + +func init() { + decoderCover = make([]bool, len(instFormats)) +} + +// Decode decodes the 4 bytes in src as a single instruction. +func Decode(src []byte) (inst Inst, err error) { + if len(src) < 4 { + return Inst{}, errShort + } + + x := binary.LittleEndian.Uint32(src) + +Search: + for i := range instFormats { + f := &instFormats[i] + if x&f.mask != f.value { + continue + } + if f.canDecode != nil && !f.canDecode(x) { + continue + } + // Decode args. + var args Args + for j, aop := range f.args { + if aop == 0 { + break + } + arg := decodeArg(aop, x) + if arg == nil { // Cannot decode argument + continue Search + } + args[j] = arg + } + decoderCover[i] = true + inst = Inst{ + Op: f.op, + Args: args, + Enc: x, + } + return inst, nil + } + return Inst{}, errUnknown +} + +// decodeArg decodes the arg described by aop from the instruction bits x. +// It returns nil if x cannot be decoded according to aop. +func decodeArg(aop instArg, x uint32) Arg { + switch aop { + default: + return nil + + case arg_Da: + return D0 + Reg((x>>10)&(1<<5-1)) + + case arg_Dd: + return D0 + Reg(x&(1<<5-1)) + + case arg_Dm: + return D0 + Reg((x>>16)&(1<<5-1)) + + case arg_Dn: + return D0 + Reg((x>>5)&(1<<5-1)) + + case arg_Hd: + return H0 + Reg(x&(1<<5-1)) + + case arg_Hn: + return H0 + Reg((x>>5)&(1<<5-1)) + + case arg_IAddSub: + imm12 := (x >> 10) & (1<<12 - 1) + shift := (x >> 22) & (1<<2 - 1) + if shift > 1 { + return nil + } + shift = shift * 12 + return ImmShift{uint16(imm12), uint8(shift)} + + case arg_Sa: + return S0 + Reg((x>>10)&(1<<5-1)) + + case arg_Sd: + return S0 + Reg(x&(1<<5-1)) + + case arg_Sm: + return S0 + Reg((x>>16)&(1<<5-1)) + + case arg_Sn: + return S0 + Reg((x>>5)&(1<<5-1)) + + case arg_Wa: + return W0 + Reg((x>>10)&(1<<5-1)) + + case arg_Wd: + return W0 + Reg(x&(1<<5-1)) + + case arg_Wds: + return RegSP(W0) + RegSP(x&(1<<5-1)) + + case arg_Wm: + return W0 + Reg((x>>16)&(1<<5-1)) + + case arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: + return handle_ExtendedRegister(x, true) + + case arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: + return handle_ExtendedRegister(x, false) + + case arg_Wn: + return W0 + Reg((x>>5)&(1<<5-1)) + + case arg_Wns: + return RegSP(W0) + RegSP((x>>5)&(1<<5-1)) + + case arg_Xa: + return X0 + Reg((x>>10)&(1<<5-1)) + + case arg_Xd: + return X0 + Reg(x&(1<<5-1)) + + case arg_Xds: + return RegSP(X0) + RegSP(x&(1<<5-1)) + + case arg_Xm: + return X0 + Reg((x>>16)&(1<<5-1)) + + case arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31: + return handle_ImmediateShiftedRegister(x, 31, true, false) + + case arg_Wm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_31: + return handle_ImmediateShiftedRegister(x, 31, true, true) + + case arg_Xm_shift__LSL_0__LSR_1__ASR_2__0_63: + return handle_ImmediateShiftedRegister(x, 63, false, false) + + case arg_Xm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_63: + return handle_ImmediateShiftedRegister(x, 63, false, true) + + case arg_Xn: + return X0 + Reg((x>>5)&(1<<5-1)) + + case arg_Xns: + return RegSP(X0) + RegSP((x>>5)&(1<<5-1)) + + case arg_slabel_imm14_2: + imm14 := ((x >> 5) & (1<<14 - 1)) + return PCRel(((int64(imm14) << 2) << 48) >> 48) + + case arg_slabel_imm19_2: + imm19 := ((x >> 5) & (1<<19 - 1)) + return PCRel(((int64(imm19) << 2) << 43) >> 43) + + case arg_slabel_imm26_2: + imm26 := (x & (1<<26 - 1)) + return PCRel(((int64(imm26) << 2) << 36) >> 36) + + case arg_slabel_immhi_immlo_0: + immhi := ((x >> 5) & (1<<19 - 1)) + immlo := ((x >> 29) & (1<<2 - 1)) + immhilo := (immhi)<<2 | immlo + return PCRel((int64(immhilo) << 43) >> 43) + + case arg_slabel_immhi_immlo_12: + immhi := ((x >> 5) & (1<<19 - 1)) + immlo := ((x >> 29) & (1<<2 - 1)) + immhilo := (immhi)<<2 | immlo + return PCRel(((int64(immhilo) << 12) << 31) >> 31) + + case arg_Xns_mem: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrOffset, 0} + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__1_1: + return handle_MemExtend(x, 1, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__2_1: + return handle_MemExtend(x, 2, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1: + return handle_MemExtend(x, 3, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__absent_0__0_1: + return handle_MemExtend(x, 1, true) + + case arg_Xns_mem_optional_imm12_1_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12)} + + case arg_Xns_mem_optional_imm12_2_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 1)} + + case arg_Xns_mem_optional_imm12_4_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 2)} + + case arg_Xns_mem_optional_imm12_8_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 3)} + + case arg_Xns_mem_optional_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_optional_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_optional_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrOffset, (int32(imm9) << 23) >> 23} + + case arg_Xns_mem_post_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_post_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_post_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm9)) << 23) >> 23} + + case arg_Xns_mem_wb_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_wb_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_wb_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm9)) << 23) >> 23} + + case arg_Ws: + return W0 + Reg((x>>16)&(1<<5-1)) + + case arg_Wt: + return W0 + Reg(x&(1<<5-1)) + + case arg_Wt2: + return W0 + Reg((x>>10)&(1<<5-1)) + + case arg_Xs: + return X0 + Reg((x>>16)&(1<<5-1)) + + case arg_Xt: + return X0 + Reg(x&(1<<5-1)) + + case arg_Xt2: + return X0 + Reg((x>>10)&(1<<5-1)) + + case arg_immediate_0_127_CRm_op2: + crm_op2 := (x >> 5) & (1<<7 - 1) + return Imm_hint(crm_op2) + + case arg_immediate_0_15_CRm: + crm := (x >> 8) & (1<<4 - 1) + return Imm{crm, false} + + case arg_immediate_0_15_nzcv: + nzcv := x & (1<<4 - 1) + return Imm{nzcv, false} + + case arg_immediate_0_31_imm5: + imm5 := (x >> 16) & (1<<5 - 1) + return Imm{imm5, false} + + case arg_immediate_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, false} + + case arg_immediate_0_31_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms, true} + + case arg_immediate_0_63_b5_b40: + b5 := (x >> 31) & 1 + b40 := (x >> 19) & (1<<5 - 1) + return Imm{(b5 << 5) | b40, true} + + case arg_immediate_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, false} + + case arg_immediate_0_63_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms, true} + + case arg_immediate_0_65535_imm16: + imm16 := (x >> 5) & (1<<16 - 1) + return Imm{imm16, false} + + case arg_immediate_0_7_op1: + op1 := (x >> 16) & (1<<3 - 1) + return Imm{op1, true} + + case arg_immediate_0_7_op2: + op2 := (x >> 5) & (1<<3 - 1) + return Imm{op2, true} + + case arg_immediate_ASR_SBFM_32M_bitfield_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_ASR_SBFM_64M_bitfield_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_BFI_BFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_BFI_BFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_BFI_BFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_BFI_BFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_BFXIL_BFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_BFXIL_BFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_BFXIL_BFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_BFXIL_BFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_bitmask_32_imms_immr: + return handle_bitmasks(x, 32) + + case arg_immediate_bitmask_64_N_imms_immr: + return handle_bitmasks(x, 64) + + case arg_immediate_LSL_UBFM_32M_bitfield_0_31_immr: + imms := (x >> 10) & (1<<6 - 1) + shift := 31 - imms + if shift > 31 { + return nil + } + return Imm{shift, true} + + case arg_immediate_LSL_UBFM_64M_bitfield_0_63_immr: + imms := (x >> 10) & (1<<6 - 1) + shift := 63 - imms + if shift > 63 { + return nil + } + return Imm{shift, true} + + case arg_immediate_LSR_UBFM_32M_bitfield_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_LSR_UBFM_64M_bitfield_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_optional_0_15_CRm: + crm := (x >> 8) & (1<<4 - 1) + return Imm_clrex(crm) + + case arg_immediate_optional_0_65535_imm16: + imm16 := (x >> 5) & (1<<16 - 1) + return Imm_dcps(imm16) + + case arg_immediate_OptLSL_amount_16_0_16: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + return ImmShift{uint16(imm16), uint8(shift)} + + case arg_immediate_OptLSL_amount_16_0_48: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + return ImmShift{uint16(imm16), uint8(shift)} + + case arg_immediate_SBFIZ_SBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_SBFIZ_SBFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_SBFIZ_SBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_SBFIZ_SBFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_SBFX_SBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_SBFX_SBFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_SBFX_SBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_shift_32_implicit_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + result := uint32(imm16) << shift + return Imm{result, false} + + case arg_immediate_shift_32_implicit_inverse_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + result := uint32(imm16) << shift + return Imm{^result, false} + + case arg_immediate_shift_64_implicit_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + result := uint64(imm16) << shift + return Imm64{result, false} + + case arg_immediate_shift_64_implicit_inverse_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + result := uint64(imm16) << shift + return Imm64{^result, false} + + case arg_immediate_UBFIZ_UBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_UBFIZ_UBFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_UBFIZ_UBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_UBFIZ_UBFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_UBFX_UBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_UBFX_UBFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_UBFX_UBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_UBFX_UBFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_Rt_31_1__W_0__X_1: + b5 := (x >> 31) & 1 + Rt := x & (1<<5 - 1) + if b5 == 0 { + return W0 + Reg(Rt) + } else { + return X0 + Reg(Rt) + } + + case arg_cond_AllowALNV_Normal: + cond := (x >> 12) & (1<<4 - 1) + return Cond{uint8(cond), false} + + case arg_conditional: + cond := x & (1<<4 - 1) + return Cond{uint8(cond), false} + + case arg_cond_NotAllowALNV_Invert: + cond := (x >> 12) & (1<<4 - 1) + if (cond >> 1) == 7 { + return nil + } + return Cond{uint8(cond), true} + + case arg_Cm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_c(CRm) + + case arg_Cn: + CRn := (x >> 12) & (1<<4 - 1) + return Imm_c(CRn) + + case arg_option_DMB_BO_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_option(CRm) + + case arg_option_DSB_BO_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_option(CRm) + + case arg_option_ISB_BI_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + if CRm == 15 { + return Imm_option(CRm) + } + return Imm{CRm, false} + + case arg_prfop_Rt: + Rt := x & (1<<5 - 1) + return Imm_prfop(Rt) + + case arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37: + op1 := (x >> 16) & (1<<3 - 1) + op2 := (x >> 5) & (1<<3 - 1) + if (op1 == 0) && (op2 == 5) { + return SPSel + } else if (op1 == 3) && (op2 == 6) { + return DAIFSet + } else if (op1 == 3) && (op2 == 7) { + return DAIFClr + } + return nil + + case arg_sysreg_o0_op1_CRn_CRm_op2: + op0 := (x >> 19) & (1<<2 - 1) + op1 := (x >> 16) & (1<<3 - 1) + CRn := (x >> 12) & (1<<4 - 1) + CRm := (x >> 8) & (1<<4 - 1) + op2 := (x >> 5) & (1<<3 - 1) + return Systemreg{uint8(op0), uint8(op1), uint8(CRn), uint8(CRm), uint8(op2)} + + case arg_sysop_AT_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_DC_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_TLBI_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_Bt: + return B0 + Reg(x&(1<<5-1)) + + case arg_Dt: + return D0 + Reg(x&(1<<5-1)) + + case arg_Dt2: + return D0 + Reg((x>>10)&(1<<5-1)) + + case arg_Ht: + return H0 + Reg(x&(1<<5-1)) + + case arg_immediate_0_63_immh_immb__UIntimmhimmb64_8: + immh := (x >> 19) & (1<<4 - 1) + if (immh & 8) == 0 { + return nil + } + immb := (x >> 16) & (1<<3 - 1) + return Imm{(immh << 3) + immb - 64, true} + + case arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{(immh << 3) + immb - 8, true} + } else if (immh >> 1) == 1 { + return Imm{(immh << 3) + immb - 16, true} + } else if (immh >> 2) == 1 { + return Imm{(immh << 3) + immb - 32, true} + } else { + return nil + } + + case arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: + fallthrough + + case arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{(immh << 3) + immb - 8, true} + } else if (immh >> 1) == 1 { + return Imm{(immh << 3) + immb - 16, true} + } else if (immh >> 2) == 1 { + return Imm{(immh << 3) + immb - 32, true} + } else if (immh >> 3) == 1 { + return Imm{(immh << 3) + immb - 64, true} + } else { + return nil + } + + case arg_immediate_0_width_size__8_0__16_1__32_2: + size := (x >> 22) & (1<<2 - 1) + switch size { + case 0: + return Imm{8, true} + case 1: + return Imm{16, true} + case 2: + return Imm{32, true} + default: + return nil + } + + case arg_immediate_1_64_immh_immb__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + if (immh & 8) == 0 { + return nil + } + immb := (x >> 16) & (1<<3 - 1) + return Imm{128 - ((immh << 3) + immb), true} + + case arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: + fallthrough + + case arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{16 - ((immh << 3) + immb), true} + } else if (immh >> 1) == 1 { + return Imm{32 - ((immh << 3) + immb), true} + } else if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{16 - ((immh << 3) + immb), true} + } else if (immh >> 1) == 1 { + return Imm{32 - ((immh << 3) + immb), true} + } else if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else if (immh >> 3) == 1 { + return Imm{128 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_8x8_a_b_c_d_e_f_g_h: + var imm uint64 + if x&(1<<5) != 0 { + imm = (1 << 8) - 1 + } else { + imm = 0 + } + if x&(1<<6) != 0 { + imm += ((1 << 8) - 1) << 8 + } + if x&(1<<7) != 0 { + imm += ((1 << 8) - 1) << 16 + } + if x&(1<<8) != 0 { + imm += ((1 << 8) - 1) << 24 + } + if x&(1<<9) != 0 { + imm += ((1 << 8) - 1) << 32 + } + if x&(1<<16) != 0 { + imm += ((1 << 8) - 1) << 40 + } + if x&(1<<17) != 0 { + imm += ((1 << 8) - 1) << 48 + } + if x&(1<<18) != 0 { + imm += ((1 << 8) - 1) << 56 + } + return Imm64{imm, false} + + case arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h: + pre := (x >> 5) & (1<<4 - 1) + exp := 1 - ((x >> 17) & 1) + exp = (exp << 2) + (((x >> 16) & 1) << 1) + ((x >> 9) & 1) + s := ((x >> 18) & 1) + return Imm_fp{uint8(s), int8(exp) - 3, uint8(pre)} + + case arg_immediate_exp_3_pre_4_imm8: + pre := (x >> 13) & (1<<4 - 1) + exp := 1 - ((x >> 19) & 1) + exp = (exp << 2) + ((x >> 17) & (1<<2 - 1)) + s := ((x >> 20) & 1) + return Imm_fp{uint8(s), int8(exp) - 3, uint8(pre)} + + case arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8: + fallthrough + + case arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__64UIntimmhimmb_4__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else if (immh >> 3) == 1 { + return Imm{128 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_fbits_min_1_max_32_sub_64_scale: + scale := (x >> 10) & (1<<6 - 1) + fbits := 64 - scale + if fbits > 32 { + return nil + } + return Imm{fbits, true} + + case arg_immediate_fbits_min_1_max_64_sub_64_scale: + scale := (x >> 10) & (1<<6 - 1) + fbits := 64 - scale + return Imm{fbits, true} + + case arg_immediate_floatzero: + return Imm{0, true} + + case arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10: + Q := (x >> 30) & 1 + imm4 := (x >> 11) & (1<<4 - 1) + if Q == 1 || (imm4>>3) == 0 { + return Imm{imm4, true} + } else { + return nil + } + + case arg_immediate_MSL__a_b_c_d_e_f_g_h_cmode__8_0__16_1: + var shift uint8 + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + if (x>>12)&1 == 0 { + shift = 8 + 128 + } else { + shift = 16 + 128 + } + return ImmShift{uint16(imm8), shift} + + case arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + cmode1 := (x >> 13) & 1 + shift := 8 * cmode1 + return ImmShift{uint16(imm8), uint8(shift)} + + case arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1__16_2__24_3: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + cmode1 := (x >> 13) & (1<<2 - 1) + shift := 8 * cmode1 + return ImmShift{uint16(imm8), uint8(shift)} + + case arg_immediate_OptLSLZero__a_b_c_d_e_f_g_h: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + return ImmShift{uint16(imm8), 0} + + case arg_immediate_zero: + return Imm{0, true} + + case arg_Qd: + return Q0 + Reg(x&(1<<5-1)) + + case arg_Qn: + return Q0 + Reg((x>>5)&(1<<5-1)) + + case arg_Qt: + return Q0 + Reg(x&(1<<5-1)) + + case arg_Qt2: + return Q0 + Reg((x>>10)&(1<<5-1)) + + case arg_Rn_16_5__W_1__W_2__W_4__X_8: + imm5 := (x >> 16) & (1<<5 - 1) + if ((imm5 & 1) == 1) || ((imm5 & 2) == 2) || ((imm5 & 4) == 4) { + return W0 + Reg((x>>5)&(1<<5-1)) + } else if (imm5 & 8) == 8 { + return X0 + Reg((x>>5)&(1<<5-1)) + } else { + return nil + } + + case arg_St: + return S0 + Reg(x&(1<<5-1)) + + case arg_St2: + return S0 + Reg((x>>10)&(1<<5-1)) + + case arg_Vd_16_5__B_1__H_2__S_4__D_8: + imm5 := (x >> 16) & (1<<5 - 1) + Rd := x & (1<<5 - 1) + if imm5&1 == 1 { + return B0 + Reg(Rd) + } else if imm5&2 == 2 { + return H0 + Reg(Rd) + } else if imm5&4 == 4 { + return S0 + Reg(Rd) + } else if imm5&8 == 8 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__B_1__H_2__S_4: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rd) + } else if immh>>1 == 1 { + return H0 + Reg(Rd) + } else if immh>>2 == 1 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__B_1__H_2__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rd) + } else if immh>>1 == 1 { + return H0 + Reg(Rd) + } else if immh>>2 == 1 { + return S0 + Reg(Rd) + } else if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh>>2 == 1 { + return S0 + Reg(Rd) + } else if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_1__S_0: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rd) + } else { + return D0 + Reg(Rd) + } + + case arg_Vd_22_1__S_1: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 1 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__B_0__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rd) + } else if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rd) + } else if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return D0 + Reg(Rd) + } + + case arg_Vd_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__H_0__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return H0 + Reg(Rd) + } else if size == 1 { + return S0 + Reg(Rd) + } else if size == 2 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 1 { + return S0 + Reg(Rd) + } else if size == 2 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_arrangement_16B: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + + case arg_Vd_arrangement_2D: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + + case arg_Vd_arrangement_4S: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + + case arg_Vd_arrangement_D_index__1: + Rd := x & (1<<5 - 1) + return RegisterWithArrangementAndIndex{V0 + Reg(Rd), ArrangementD, 1, 0} + + case arg_Vd_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1: + var a Arrangement + var index uint32 + Rd := x & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rd), a, uint8(index), 0} + + case arg_Vd_arrangement_imm5_Q___8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + Q := (x >> 30) & 1 + if imm5&1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if imm5&2 == 2 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if imm5&4 == 4 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if (imm5&8 == 8) && (Q == 1) { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } else { + return nil + } + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + } + return nil + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } + return nil + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + } + return nil + + case arg_Vd_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + if immh == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if immh>>1 == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if immh>>2 == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_Q___2S_0__4S_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + + case arg_Vd_arrangement_Q___4H_0__8H_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + + case arg_Vd_arrangement_Q___8B_0__16B_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + + case arg_Vd_arrangement_Q_sz___2S_00__4S_10__2D_11: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size___4S_1__2D_2: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size___8H_0__1Q_3: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 3 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement1Q, 0} + } + return nil + + case arg_Vd_arrangement_size___8H_0__4S_1__2D_2: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___4H_00__8H_01__2S_10__4S_11__1D_20__2D_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement1D, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_sz___4S_0__2D_1: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + + case arg_Vd_arrangement_sz_Q___2S_00__4S_01: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___2S_10__4S_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else /* sz == 1 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + + case arg_Vm_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rm := (x >> 16) & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rm) + } else { + return D0 + Reg(Rm) + } + + case arg_Vm_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rm) + } else if size == 1 { + return H0 + Reg(Rm) + } else if size == 2 { + return S0 + Reg(Rm) + } else { + return D0 + Reg(Rm) + } + + case arg_Vm_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rm) + } else { + return nil + } + + case arg_Vm_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rm) + } else if size == 2 { + return S0 + Reg(Rm) + } else { + return nil + } + + case arg_Vm_arrangement_4S: + Rm := (x >> 16) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + + case arg_Vm_arrangement_Q___8B_0__16B_1: + Rm := (x >> 16) & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } + + case arg_Vm_arrangement_size___8H_0__4S_1__2D_2: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_size___H_1__S_2_index__size_L_H_M__HLM_1__HL_2_1: + var a Arrangement + var index uint32 + var vm uint32 + Rm := (x >> 16) & (1<<4 - 1) + size := (x >> 22) & 3 + H := (x >> 11) & 1 + L := (x >> 21) & 1 + M := (x >> 20) & 1 + if size == 1 { + a = ArrangementH + index = (H << 2) | (L << 1) | M + vm = Rm + } else if size == 2 { + a = ArrangementS + index = (H << 1) | L + vm = (M << 4) | Rm + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(vm), a, uint8(index), 0} + + case arg_Vm_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement1D, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rm := (x >> 16) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_sz___S_0__D_1_index__sz_L_H__HL_00__H_10_1: + var a Arrangement + var index uint32 + Rm := (x >> 16) & (1<<5 - 1) + sz := (x >> 22) & 1 + H := (x >> 11) & 1 + L := (x >> 21) & 1 + if sz == 0 { + a = ArrangementS + index = (H << 1) | L + } else if sz == 1 && L == 0 { + a = ArrangementD + index = H + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rm), a, uint8(index), 0} + + case arg_Vn_19_4__B_1__H_2__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rn) + } else if immh>>1 == 1 { + return H0 + Reg(Rn) + } else if immh>>2 == 1 { + return S0 + Reg(Rn) + } else if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__H_1__S_2__D_4: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh == 1 { + return H0 + Reg(Rn) + } else if immh>>1 == 1 { + return S0 + Reg(Rn) + } else if immh>>2 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh>>2 == 1 { + return S0 + Reg(Rn) + } else if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_1_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 1} + + case arg_Vn_22_1__D_1: + sz := (x >> 22) & 1 + Rn := (x >> 5) & (1<<5 - 1) + if sz == 1 { + return D0 + Reg(Rn) + } + return nil + + case arg_Vn_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rn := (x >> 5) & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rn) + } else { + return D0 + Reg(Rn) + } + + case arg_Vn_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rn) + } else if size == 1 { + return H0 + Reg(Rn) + } else if size == 2 { + return S0 + Reg(Rn) + } else { + return D0 + Reg(Rn) + } + + case arg_Vn_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_22_2__H_0__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 0 { + return H0 + Reg(Rn) + } else if size == 1 { + return S0 + Reg(Rn) + } else if size == 2 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rn) + } else if size == 2 { + return S0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_2_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 2} + + case arg_Vn_3_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 3} + + case arg_Vn_4_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 4} + + case arg_Vn_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + + case arg_Vn_arrangement_4S: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + + case arg_Vn_arrangement_D_index__1: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementD, 1, 0} + + case arg_Vn_arrangement_D_index__imm5_1: + Rn := (x >> 5) & (1<<5 - 1) + index := (x >> 20) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementD, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2_index__imm5__imm5lt41gt_1__imm5lt42gt_2_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5_imm4__imm4lt30gt_1__imm4lt31gt_2__imm4lt32gt_4__imm4lt3gt_8_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + imm4 := (x >> 11) & (1<<4 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm4 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm4 >> 1 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm4 >> 2 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm4 >> 3 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___D_8_index__imm5_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&15 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + } + return nil + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } + return nil + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + } + return nil + + case arg_Vn_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + if immh == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if immh>>1 == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if immh>>2 == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_Q___8B_0__16B_1: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + + case arg_Vn_arrangement_Q_sz___2S_00__4S_10__2D_11: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_Q_sz___4S_10: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_S_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1: + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + index = imm5 >> 3 + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementS, uint8(index), 0} + + case arg_Vn_arrangement_size___2D_3: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 3 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size___8H_0__4S_1__2D_2: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement1D, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_sz___2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_sz___2S_0__2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + + case arg_Vn_arrangement_sz___4S_0__2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + + case arg_Vn_arrangement_sz_Q___2S_00__4S_01: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else /* sz == 1 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + + case arg_Vt_1_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 1} + + case arg_Vt_1_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 1} + + case arg_Vt_1_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 1} + + case arg_Vt_1_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 1} + + case arg_Vt_1_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 1} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 1} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 1} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 1} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 1} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 1} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 1} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 1} + } + + case arg_Vt_2_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 2} + + case arg_Vt_2_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 2} + + case arg_Vt_2_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 2} + + case arg_Vt_2_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 2} + + case arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 2} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 2} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 2} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 2} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 2} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 2} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 2} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 2} + } + + case arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 2} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 2} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 2} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 2} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 2} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 2} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 2} + } + return nil + + case arg_Vt_3_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 3} + + case arg_Vt_3_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 3} + + case arg_Vt_3_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 3} + + case arg_Vt_3_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 3} + + case arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 3} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 3} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 3} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 3} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 3} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 3} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 3} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 3} + } + + case arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 3} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 3} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 3} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 3} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 3} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 3} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 3} + } + return nil + + case arg_Vt_4_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 4} + + case arg_Vt_4_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 4} + + case arg_Vt_4_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 4} + + case arg_Vt_4_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 4} + + case arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 4} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 4} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 4} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 4} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 4} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 4} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 4} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 4} + } + + case arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 4} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 4} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 4} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 4} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 4} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 4} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 4} + } + return nil + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__4_1: + return handle_MemExtend(x, 4, false) + + case arg_Xns_mem_offset: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrOffset, 0} + + case arg_Xns_mem_optional_imm12_16_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 4)} + + case arg_Xns_mem_optional_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 4)) << 21) >> 21} + + case arg_Xns_mem_post_fixedimm_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 1} + + case arg_Xns_mem_post_fixedimm_12: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 12} + + case arg_Xns_mem_post_fixedimm_16: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 16} + + case arg_Xns_mem_post_fixedimm_2: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 2} + + case arg_Xns_mem_post_fixedimm_24: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 24} + + case arg_Xns_mem_post_fixedimm_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 3} + + case arg_Xns_mem_post_fixedimm_32: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 32} + + case arg_Xns_mem_post_fixedimm_4: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 4} + + case arg_Xns_mem_post_fixedimm_6: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 6} + + case arg_Xns_mem_post_fixedimm_8: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 8} + + case arg_Xns_mem_post_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 4)) << 21) >> 21} + + case arg_Xns_mem_post_Q__16_0__32_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 16)} + + case arg_Xns_mem_post_Q__24_0__48_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 24)} + + case arg_Xns_mem_post_Q__32_0__64_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 32)} + + case arg_Xns_mem_post_Q__8_0__16_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 8)} + + case arg_Xns_mem_post_size__1_0__2_1__4_2__8_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(1 << size)} + + case arg_Xns_mem_post_size__2_0__4_1__8_2__16_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(2 << size)} + + case arg_Xns_mem_post_size__3_0__6_1__12_2__24_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(3 << size)} + + case arg_Xns_mem_post_size__4_0__8_1__16_2__32_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(4 << size)} + + case arg_Xns_mem_post_Xm: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Rm := (x >> 16) & (1<<5 - 1) + return MemImmediate{Rn, AddrPostReg, int32(Rm)} + + case arg_Xns_mem_wb_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 4)) << 21) >> 21} + } +} + +func handle_ExtendedRegister(x uint32, has_width bool) Arg { + s := (x >> 29) & 1 + rm := (x >> 16) & (1<<5 - 1) + option := (x >> 13) & (1<<3 - 1) + imm3 := (x >> 10) & (1<<3 - 1) + rn := (x >> 5) & (1<<5 - 1) + rd := x & (1<<5 - 1) + is_32bit := !has_width + var rea RegExtshiftAmount + if has_width { + if option&0x3 != 0x3 { + rea.reg = W0 + Reg(rm) + } else { + rea.reg = X0 + Reg(rm) + } + } else { + rea.reg = W0 + Reg(rm) + } + switch option { + case 0: + rea.extShift = uxtb + case 1: + rea.extShift = uxth + case 2: + if is_32bit && (rn == 31 || (s == 0 && rd == 31)) { + if imm3 != 0 { + rea.extShift = lsl + } else { + rea.extShift = ExtShift(0) + } + } else { + rea.extShift = uxtw + } + case 3: + if !is_32bit && (rn == 31 || (s == 0 && rd == 31)) { + if imm3 != 0 { + rea.extShift = lsl + } else { + rea.extShift = ExtShift(0) + } + } else { + rea.extShift = uxtx + } + case 4: + rea.extShift = sxtb + case 5: + rea.extShift = sxth + case 6: + rea.extShift = sxtw + case 7: + rea.extShift = sxtx + } + rea.show_zero = false + rea.amount = uint8(imm3) + return rea +} + +func handle_ImmediateShiftedRegister(x uint32, max uint8, is_w, has_ror bool) Arg { + var rsa RegExtshiftAmount + if is_w { + rsa.reg = W0 + Reg((x>>16)&(1<<5-1)) + } else { + rsa.reg = X0 + Reg((x>>16)&(1<<5-1)) + } + switch (x >> 22) & 0x3 { + case 0: + rsa.extShift = lsl + case 1: + rsa.extShift = lsr + case 2: + rsa.extShift = asr + case 3: + if has_ror { + rsa.extShift = ror + } else { + return nil + } + } + rsa.show_zero = true + rsa.amount = uint8((x >> 10) & (1<<6 - 1)) + if rsa.amount == 0 && rsa.extShift == lsl { + rsa.extShift = ExtShift(0) + } else if rsa.amount > max { + return nil + } + return rsa +} + +func handle_MemExtend(x uint32, mult uint8, absent bool) Arg { + var extend ExtShift + var Rm Reg + option := (x >> 13) & (1<<3 - 1) + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + if (option & 1) != 0 { + Rm = Reg(X0) + Reg(x>>16&(1<<5-1)) + } else { + Rm = Reg(W0) + Reg(x>>16&(1<<5-1)) + } + switch option { + default: + return nil + case 2: + extend = uxtw + case 3: + extend = lsl + case 6: + extend = sxtw + case 7: + extend = sxtx + } + amount := (uint8((x >> 12) & 1)) * mult + return MemExtend{Rn, Rm, extend, amount, absent} +} + +func handle_bitmasks(x uint32, datasize uint8) Arg { + var length, levels, esize, i uint8 + var welem, wmask uint64 + n := (x >> 22) & 1 + imms := uint8((x >> 10) & (1<<6 - 1)) + immr := uint8((x >> 16) & (1<<6 - 1)) + if n != 0 { + length = 6 + } else if (imms & 32) == 0 { + length = 5 + } else if (imms & 16) == 0 { + length = 4 + } else if (imms & 8) == 0 { + length = 3 + } else if (imms & 4) == 0 { + length = 2 + } else if (imms & 2) == 0 { + length = 1 + } else { + return nil + } + levels = 1< datasize { + return nil + } + welem = 1<<(s+1) - 1 + ror := (welem >> r) | (welem << (esize - r)) + ror &= ((1 << esize) - 1) + wmask = 0 + for i = 0; i < datasize; i += esize { + wmask = (wmask << esize) | ror + } + return Imm64{wmask, false} +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go b/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go new file mode 100755 index 00000000..9c7d2b62 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "encoding/hex" + "io/ioutil" + "path/filepath" + "strings" + "testing" +) + +func testDecode(t *testing.T, syntax string) { + input := filepath.Join("testdata", syntax+"cases.txt") + data, err := ioutil.ReadFile(input) + if err != nil { + t.Fatal(err) + } + all := string(data) + for strings.Contains(all, "\t\t") { + all = strings.Replace(all, "\t\t", "\t", -1) + } + for _, line := range strings.Split(all, "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + f := strings.SplitN(line, "\t", 2) + i := strings.Index(f[0], "|") + if i < 0 { + t.Errorf("parsing %q: missing | separator", f[0]) + continue + } + if i%2 != 0 { + t.Errorf("parsing %q: misaligned | separator", f[0]) + } + code, err := hex.DecodeString(f[0][:i] + f[0][i+1:]) + if err != nil { + t.Errorf("parsing %q: %v", f[0], err) + continue + } + asm := f[1] + inst, decodeErr := Decode(code) + if decodeErr != nil && decodeErr != errUnknown { + // Some rarely used system instructions are not supported + // Following logicals will filter such unknown instructions + + t.Errorf("parsing %x: %s", code, decodeErr) + continue + } + var out string + switch syntax { + case "gnu": + out = GNUSyntax(inst) + case "plan9": + out = GoSyntax(inst, 0, nil, nil) + default: + t.Errorf("unknown syntax %q", syntax) + continue + } + // TODO: system instruction. + var Todo = strings.Fields(` + sys + dc + at + tlbi + ic + hvc + smc + `) + if strings.Replace(out, " ", "", -1) != strings.Replace(asm, " ", "", -1) && !hasPrefix(asm, Todo...) { + // Exclude MSR since GNU objdump result is incorrect. eg. 0xd504431f msr s0_4_c4_c3_0, xzr + if !strings.HasSuffix(asm, " nv") && !strings.HasPrefix(asm, "msr") { + t.Errorf("Decode(%s) [%s] = %s, want %s", strings.Trim(f[0], "|"), syntax, out, asm) + } + } + } +} + +func TestDecodeGNUSyntax(t *testing.T) { + testDecode(t, "gnu") +} + +func TestDecodeGoSyntax(t *testing.T) { + testDecode(t, "plan9") +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go b/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go new file mode 100755 index 00000000..f432203e --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go @@ -0,0 +1,604 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for testing against external disassembler program. +// Copied and simplified from ../../arm/armasm/ext_test.go. + +package arm64asm + +import ( + "bufio" + "bytes" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" +) + +var ( + dumpTest = flag.Bool("dump", false, "dump all encodings") + mismatch = flag.Bool("mismatch", false, "log allowed mismatches") + longTest = flag.Bool("long", false, "long test") + keep = flag.Bool("keep", false, "keep object files around") + debug = false +) + +// An ExtInst represents a single decoded instruction parsed +// from an external disassembler's output. +type ExtInst struct { + addr uint64 + enc [4]byte + nenc int + text string +} + +func (r ExtInst) String() string { + return fmt.Sprintf("%#x: % x: %s", r.addr, r.enc, r.text) +} + +// An ExtDis is a connection between an external disassembler and a test. +type ExtDis struct { + Arch Mode + Dec chan ExtInst + File *os.File + Size int + KeepFile bool + Cmd *exec.Cmd +} + +// InstJson describes instruction fields value got from ARMv8-A Reference Manual +type InstJson struct { + Name string + Bits string + Arch string + Syntax string + Code string + Alias string + Enc uint32 +} + +// A Mode is an instruction execution mode. +type Mode int + +const ( + _ Mode = iota + ModeARM64 +) + +// Run runs the given command - the external disassembler - and returns +// a buffered reader of its standard output. +func (ext *ExtDis) Run(cmd ...string) (*bufio.Reader, error) { + if *keep { + log.Printf("%s\n", strings.Join(cmd, " ")) + } + ext.Cmd = exec.Command(cmd[0], cmd[1:]...) + out, err := ext.Cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdoutpipe: %v", err) + } + if err := ext.Cmd.Start(); err != nil { + return nil, fmt.Errorf("exec: %v", err) + } + + b := bufio.NewReaderSize(out, 1<<20) + return b, nil +} + +// Wait waits for the command started with Run to exit. +func (ext *ExtDis) Wait() error { + return ext.Cmd.Wait() +} + +// testExtDis tests a set of byte sequences against an external disassembler. +// The disassembler is expected to produce the given syntax and run +// in the given architecture mode (16, 32, or 64-bit). +// The extdis function must start the external disassembler +// and then parse its output, sending the parsed instructions on ext.Dec. +// The generate function calls its argument f once for each byte sequence +// to be tested. The generate function itself will be called twice, and it must +// make the same sequence of calls to f each time. +// When a disassembly does not match the internal decoding, +// allowedMismatch determines whether this mismatch should be +// allowed, or else considered an error. +func testExtDis( + t *testing.T, + syntax string, + arch Mode, + extdis func(ext *ExtDis) error, + generate func(f func([]byte)), + allowedMismatch func(text string, inst *Inst, dec ExtInst) bool, +) { + start := time.Now() + ext := &ExtDis{ + Dec: make(chan ExtInst), + Arch: arch, + } + errc := make(chan error) + + // First pass: write instructions to input file for external disassembler. + file, f, size, err := writeInst(generate) + if err != nil { + t.Fatal(err) + } + ext.Size = size + ext.File = f + defer func() { + f.Close() + if !*keep { + os.Remove(file) + } + }() + + // Second pass: compare disassembly against our decodings. + var ( + totalTests = 0 + totalSkips = 0 + totalErrors = 0 + + errors = make([]string, 0, 100) // Sampled errors, at most cap + ) + go func() { + errc <- extdis(ext) + }() + + generate(func(enc []byte) { + dec, ok := <-ext.Dec + if !ok { + t.Errorf("decoding stream ended early") + return + } + inst, text := disasm(syntax, pad(enc)) + + totalTests++ + if *dumpTest { + fmt.Printf("%x -> %s [%d]\n", enc[:len(enc)], dec.text, dec.nenc) + } + if text != dec.text && !strings.Contains(dec.text, "unknown") && syntax == "gnu" { + suffix := "" + if allowedMismatch(text, &inst, dec) { + totalSkips++ + if !*mismatch { + return + } + suffix += " (allowed mismatch)" + } + totalErrors++ + cmp := fmt.Sprintf("decode(%x) = %q, %d, want %q, %d%s\n", enc, text, len(enc), dec.text, dec.nenc, suffix) + + if len(errors) >= cap(errors) { + j := rand.Intn(totalErrors) + if j >= cap(errors) { + return + } + errors = append(errors[:j], errors[j+1:]...) + } + errors = append(errors, cmp) + } + }) + + if *mismatch { + totalErrors -= totalSkips + } + + for _, b := range errors { + t.Log(b) + } + + if totalErrors > 0 { + t.Fail() + } + t.Logf("%d test cases, %d expected mismatches, %d failures; %.0f cases/second", totalTests, totalSkips, totalErrors, float64(totalTests)/time.Since(start).Seconds()) + t.Logf("decoder coverage: %.1f%%;\n", decodeCoverage()) + if err := <-errc; err != nil { + t.Fatalf("external disassembler: %v", err) + } + +} + +// Start address of text. +const start = 0x8000 + +// writeInst writes the generated byte sequences to a new file +// starting at offset start. That file is intended to be the input to +// the external disassembler. +func writeInst(generate func(func([]byte))) (file string, f *os.File, size int, err error) { + f, err = ioutil.TempFile("", "arm64asm") + if err != nil { + return + } + + file = f.Name() + + f.Seek(start, io.SeekStart) + w := bufio.NewWriter(f) + defer w.Flush() + size = 0 + generate(func(x []byte) { + if debug { + fmt.Printf("%#x: %x%x\n", start+size, x, zeros[len(x):]) + } + w.Write(x) + w.Write(zeros[len(x):]) + size += len(zeros) + }) + return file, f, size, nil +} + +var zeros = []byte{0, 0, 0, 0} + +// pad pads the code sequence with pops. +func pad(enc []byte) []byte { + if len(enc) < 4 { + enc = append(enc[:len(enc):len(enc)], zeros[:4-len(enc)]...) + } + return enc +} + +// disasm returns the decoded instruction and text +// for the given source bytes, using the given syntax and mode. +func disasm(syntax string, src []byte) (inst Inst, text string) { + var err error + inst, err = Decode(src) + if err != nil { + text = "error: " + err.Error() + return + } + text = inst.String() + switch syntax { + case "gnu": + text = GNUSyntax(inst) + case "plan9": // [sic] + text = GoSyntax(inst, 0, nil, nil) + default: + text = "error: unknown syntax " + syntax + } + return +} + +// decodecoverage returns a floating point number denoting the +// decoder coverage. +func decodeCoverage() float64 { + n := 0 + for _, t := range decoderCover { + if t { + n++ + } + } + return 100 * float64(1+n) / float64(1+len(decoderCover)) +} + +// Helpers for writing disassembler output parsers. + +// hasPrefix reports whether any of the space-separated words in the text s +// begins with any of the given prefixes. +func hasPrefix(s string, prefixes ...string) bool { + for _, prefix := range prefixes { + for cur_s := s; cur_s != ""; { + if strings.HasPrefix(cur_s, prefix) { + return true + } + i := strings.Index(cur_s, " ") + if i < 0 { + break + } + cur_s = cur_s[i+1:] + } + } + return false +} + +// isHex reports whether b is a hexadecimal character (0-9a-fA-F). +func isHex(b byte) bool { + return ('0' <= b && b <= '9') || ('a' <= b && b <= 'f') || ('A' <= b && b <= 'F') +} + +// parseHex parses the hexadecimal byte dump in hex, +// appending the parsed bytes to raw and returning the updated slice. +// The returned bool reports whether any invalid hex was found. +// Spaces and tabs between bytes are okay but any other non-hex is not. +func parseHex(hex []byte, raw []byte) ([]byte, bool) { + hex = bytes.TrimSpace(hex) + for j := 0; j < len(hex); { + for hex[j] == ' ' || hex[j] == '\t' { + j++ + } + if j >= len(hex) { + break + } + if j+2 > len(hex) || !isHex(hex[j]) || !isHex(hex[j+1]) { + return nil, false + } + raw = append(raw, unhex(hex[j])<<4|unhex(hex[j+1])) + j += 2 + } + return raw, true +} + +func unhex(b byte) byte { + if '0' <= b && b <= '9' { + return b - '0' + } else if 'A' <= b && b <= 'F' { + return b - 'A' + 10 + } else if 'a' <= b && b <= 'f' { + return b - 'a' + 10 + } + return 0 +} + +// index is like bytes.Index(s, []byte(t)) but avoids the allocation. +func index(s []byte, t string) int { + i := 0 + for { + j := bytes.IndexByte(s[i:], t[0]) + if j < 0 { + return -1 + } + i = i + j + if i+len(t) > len(s) { + return -1 + } + for k := 1; k < len(t); k++ { + if s[i+k] != t[k] { + goto nomatch + } + } + return i + nomatch: + i++ + } +} + +// fixSpace rewrites runs of spaces, tabs, and newline characters into single spaces in s. +// If s must be rewritten, it is rewritten in place. +func fixSpace(s []byte) []byte { + s = bytes.TrimSpace(s) + for i := 0; i < len(s); i++ { + if s[i] == '\t' || s[i] == '\n' || i > 0 && s[i] == ' ' && s[i-1] == ' ' { + goto Fix + } + } + return s + +Fix: + b := s + w := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if c == '\t' || c == '\n' { + c = ' ' + } + if c == ' ' && w > 0 && b[w-1] == ' ' { + continue + } + b[w] = c + w++ + } + if w > 0 && b[w-1] == ' ' { + w-- + } + return b[:w] +} + +// Fllowing regular expressions matches instructions using relative addressing mode. +// pcrel matches B instructions and BL instructions. +// pcrelr matches instrucions which consisted of register arguments and label arguments. +// pcrelim matches instructions which consisted of register arguments, immediate +// arguments and lable arguments. +// pcrelrzr and prcelimzr matches instructions when register arguments is zero register. +// pcrelprfm matches PRFM instructions when arguments consisted of register and lable. +// pcrelprfmim matches PRFM instructions when arguments consisted of immediate and lable. +var ( + pcrel = regexp.MustCompile(`^((?:.* )?(?:b|bl)x?(?:\.)?(?:eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|nv)?) 0x([0-9a-f]+)$`) + pcrelr = regexp.MustCompile(`^((?:.*)?(?:ldr|adrp|adr|cbnz|cbz|ldrsw) (?:x|w|s|d|q)(?:[0-9]+,)) 0x([0-9a-f]+)$`) + pcrelrzr = regexp.MustCompile(`^((?:.*)?(?:ldr|adrp|adr|cbnz|cbz|ldrsw) (?:x|w)zr,) 0x([0-9a-f]+)$`) + pcrelim = regexp.MustCompile(`^((?:.*)?(?:tbnz|tbz) (?:x|w)(?:[0-9]+,) (?:#[0-9a-f]+,)) 0x([0-9a-f]+)$`) + pcrelimzr = regexp.MustCompile(`^((?:.*)?(?:tbnz|tbz) (?:x|w)zr, (?:#[0-9a-f]+,)) 0x([0-9a-f]+)$`) + pcrelprfm = regexp.MustCompile(`^((?:.*)?(?:prfm) (?:[0-9a-z]+,)) 0x([0-9a-f]+)$`) + pcrelprfmim = regexp.MustCompile(`^((?:.*)?(?:prfm) (?:#0x[0-9a-f]+,)) 0x([0-9a-f]+)$`) +) + +// Round is the multiple of the number of instructions that read from Json file. +// Round used as seed value for pseudo-random number generator provides the same sequence +// in the same round run for the external disassembler and decoder. +var Round int + +// condmark is used to mark conditional instructions when need to generate and test +// conditional instructions. +var condmark bool = false + +// Generate instruction binary according to Json file +// Encode variable field of instruction with random value +func doFuzzy(inst *InstJson, Ninst int) { + var testdata uint32 + var NonDigRE = regexp.MustCompile(`[\D]`) + rand.Seed(int64(Round + Ninst)) + off := 0 + DigBit := "" + if condmark == true && !strings.Contains(inst.Bits, "cond") { + inst.Enc = 0xffffffff + } else { + for _, f := range strings.Split(inst.Bits, "|") { + if i := strings.Index(f, ":"); i >= 0 { + // consider f contains "01:2" and "Rm:5" + DigBit = f[:i] + m := NonDigRE.FindStringSubmatch(DigBit) + if m == nil { + DigBit = strings.TrimSpace(DigBit) + s := strings.Split(DigBit, "") + for i := 0; i < len(s); i++ { + switch s[i] { + case "1", "(1)": + testdata |= 1 << uint(31-off) + } + off++ + } + } else { + // DigBit is "Rn" or "imm3" + n, _ := strconv.Atoi(f[i+1:]) + if DigBit == "cond" && condmark == true { + r := uint8(Round) + for i := n - 1; i >= 0; i-- { + switch (r >> uint(i)) & 1 { + case 1: + testdata |= 1 << uint(31-off) + } + off++ + } + } else { + for i := 0; i < n; i++ { + r := rand.Intn(2) + switch r { + case 1: + testdata |= 1 << uint(31-off) + } + off++ + } + } + } + continue + } + for _, bit := range strings.Fields(f) { + switch bit { + case "0", "(0)": + off++ + continue + case "1", "(1)": + testdata |= 1 << uint(31-off) + default: + r := rand.Intn(2) + switch r { + case 1: + testdata |= 1 << uint(31-off) + } + } + off++ + } + } + if off != 32 { + log.Printf("incorrect bit count for %s %s: have %d", inst.Name, inst.Bits, off) + } + inst.Enc = testdata + } +} + +// Generators. +// +// The test cases are described as functions that invoke a callback repeatedly, +// with a new input sequence each time. These helpers make writing those +// a little easier. + +// JSONCases generates ARM64 instructions according to inst.json. +func JSONCases(t *testing.T) func(func([]byte)) { + return func(try func([]byte)) { + data, err := ioutil.ReadFile("inst.json") + if err != nil { + t.Fatal(err) + } + var insts []InstJson + var instsN []InstJson + // Change N value to get more cases only when condmark=false. + N := 100 + if condmark == true { + N = 16 + } + if err := json.Unmarshal(data, &insts); err != nil { + t.Fatal(err) + } + // Append instructions to get more test cases. + for i := 0; i < N; { + for _, inst := range insts { + instsN = append(instsN, inst) + } + i++ + } + Round = 0 + for i := range instsN { + if i%len(insts) == 0 { + Round++ + } + doFuzzy(&instsN[i], i) + } + for _, inst := range instsN { + if condmark == true && inst.Enc == 0xffffffff { + continue + } + enc := inst.Enc + try([]byte{byte(enc), byte(enc >> 8), byte(enc >> 16), byte(enc >> 24)}) + } + } +} + +// condCases generates conditional instructions. +func condCases(t *testing.T) func(func([]byte)) { + return func(try func([]byte)) { + condmark = true + JSONCases(t)(func(enc []byte) { + try(enc) + }) + } +} + +// hexCases generates the cases written in hexadecimal in the encoded string. +// Spaces in 'encoded' separate entire test cases, not individual bytes. +func hexCases(t *testing.T, encoded string) func(func([]byte)) { + return func(try func([]byte)) { + for _, x := range strings.Fields(encoded) { + src, err := hex.DecodeString(x) + if err != nil { + t.Errorf("parsing %q: %v", x, err) + } + try(src) + } + } +} + +// testdataCases generates the test cases recorded in testdata/cases.txt. +// It only uses the inputs; it ignores the answers recorded in that file. +func testdataCases(t *testing.T, syntax string) func(func([]byte)) { + var codes [][]byte + input := filepath.Join("testdata", syntax+"cases.txt") + data, err := ioutil.ReadFile(input) + if err != nil { + t.Fatal(err) + } + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + f := strings.Fields(line)[0] + i := strings.Index(f, "|") + if i < 0 { + t.Errorf("parsing %q: missing | separator", f) + continue + } + if i%2 != 0 { + t.Errorf("parsing %q: misaligned | separator", f) + } + code, err := hex.DecodeString(f[:i] + f[i+1:]) + if err != nil { + t.Errorf("parsing %q: %v", f, err) + continue + } + codes = append(codes, code) + } + + return func(try func([]byte)) { + for _, code := range codes { + try(code) + } + } +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go b/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go new file mode 100755 index 00000000..d1be0461 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "strings" +) + +// GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils. +// This form typically matches the syntax defined in the ARM Reference Manual. +func GNUSyntax(inst Inst) string { + switch inst.Op { + case RET: + if r, ok := inst.Args[0].(Reg); ok && r == X30 { + return "ret" + } + case B: + if _, ok := inst.Args[0].(Cond); ok { + return strings.ToLower("b." + inst.Args[0].String() + " " + inst.Args[1].String()) + } + case SYSL: + result := strings.ToLower(inst.String()) + return strings.Replace(result, "c", "C", -1) + case DCPS1, DCPS2, DCPS3, CLREX: + return strings.ToLower(strings.TrimSpace(inst.String())) + case ISB: + if strings.Contains(inst.String(), "SY") { + result := strings.TrimSuffix(inst.String(), " SY") + return strings.ToLower(result) + } + } + return strings.ToLower(inst.String()) +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/inst.go b/vendor/golang.org/x/arch/arm64/arm64asm/inst.go new file mode 100755 index 00000000..cf690e2c --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/inst.go @@ -0,0 +1,968 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "fmt" + "strings" +) + +// An Op is an ARM64 opcode. +type Op uint16 + +// NOTE: The actual Op values are defined in tables.go. +// They are chosen to simplify instruction decoding and +// are not a dense packing from 0 to N, although the +// density is high, probably at least 90%. + +func (op Op) String() string { + if op >= Op(len(opstr)) || opstr[op] == "" { + return fmt.Sprintf("Op(%d)", int(op)) + } + return opstr[op] +} + +// An Inst is a single instruction. +type Inst struct { + Op Op // Opcode mnemonic + Enc uint32 // Raw encoding bits. + Args Args // Instruction arguments, in ARM manual order. +} + +func (i Inst) String() string { + var args []string + for _, arg := range i.Args { + if arg == nil { + break + } + args = append(args, arg.String()) + } + return i.Op.String() + " " + strings.Join(args, ", ") +} + +// An Args holds the instruction arguments. +// If an instruction has fewer than 5 arguments, +// the final elements in the array are nil. +type Args [5]Arg + +// An Arg is a single instruction argument, one of these types: +// Reg, RegSP, ImmShift, RegExtshiftAmount, PCRel, MemImmediate, +// MemExtend, Imm, Imm64, Imm_hint, Imm_clrex, Imm_dcps, Cond, +// Imm_c, Imm_option, Imm_prfop, Pstatefield, Systemreg, Imm_fp +// RegisterWithArrangement, RegisterWithArrangementAndIndex. +type Arg interface { + isArg() + String() string +} + +// A Reg is a single register. +// The zero value denotes W0, not the absence of a register. +type Reg uint16 + +const ( + W0 Reg = iota + W1 + W2 + W3 + W4 + W5 + W6 + W7 + W8 + W9 + W10 + W11 + W12 + W13 + W14 + W15 + W16 + W17 + W18 + W19 + W20 + W21 + W22 + W23 + W24 + W25 + W26 + W27 + W28 + W29 + W30 + WZR + + X0 + X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X17 + X18 + X19 + X20 + X21 + X22 + X23 + X24 + X25 + X26 + X27 + X28 + X29 + X30 + XZR + + B0 + B1 + B2 + B3 + B4 + B5 + B6 + B7 + B8 + B9 + B10 + B11 + B12 + B13 + B14 + B15 + B16 + B17 + B18 + B19 + B20 + B21 + B22 + B23 + B24 + B25 + B26 + B27 + B28 + B29 + B30 + B31 + + H0 + H1 + H2 + H3 + H4 + H5 + H6 + H7 + H8 + H9 + H10 + H11 + H12 + H13 + H14 + H15 + H16 + H17 + H18 + H19 + H20 + H21 + H22 + H23 + H24 + H25 + H26 + H27 + H28 + H29 + H30 + H31 + + S0 + S1 + S2 + S3 + S4 + S5 + S6 + S7 + S8 + S9 + S10 + S11 + S12 + S13 + S14 + S15 + S16 + S17 + S18 + S19 + S20 + S21 + S22 + S23 + S24 + S25 + S26 + S27 + S28 + S29 + S30 + S31 + + D0 + D1 + D2 + D3 + D4 + D5 + D6 + D7 + D8 + D9 + D10 + D11 + D12 + D13 + D14 + D15 + D16 + D17 + D18 + D19 + D20 + D21 + D22 + D23 + D24 + D25 + D26 + D27 + D28 + D29 + D30 + D31 + + Q0 + Q1 + Q2 + Q3 + Q4 + Q5 + Q6 + Q7 + Q8 + Q9 + Q10 + Q11 + Q12 + Q13 + Q14 + Q15 + Q16 + Q17 + Q18 + Q19 + Q20 + Q21 + Q22 + Q23 + Q24 + Q25 + Q26 + Q27 + Q28 + Q29 + Q30 + Q31 + + V0 + V1 + V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9 + V10 + V11 + V12 + V13 + V14 + V15 + V16 + V17 + V18 + V19 + V20 + V21 + V22 + V23 + V24 + V25 + V26 + V27 + V28 + V29 + V30 + V31 + + WSP = WZR // These are different registers with the same encoding. + SP = XZR // These are different registers with the same encoding. +) + +func (Reg) isArg() {} + +func (r Reg) String() string { + switch { + case r == WZR: + return "WZR" + case r == XZR: + return "XZR" + case W0 <= r && r <= W30: + return fmt.Sprintf("W%d", int(r-W0)) + case X0 <= r && r <= X30: + return fmt.Sprintf("X%d", int(r-X0)) + + case B0 <= r && r <= B31: + return fmt.Sprintf("B%d", int(r-B0)) + case H0 <= r && r <= H31: + return fmt.Sprintf("H%d", int(r-H0)) + case S0 <= r && r <= S31: + return fmt.Sprintf("S%d", int(r-S0)) + case D0 <= r && r <= D31: + return fmt.Sprintf("D%d", int(r-D0)) + case Q0 <= r && r <= Q31: + return fmt.Sprintf("Q%d", int(r-Q0)) + + case V0 <= r && r <= V31: + return fmt.Sprintf("V%d", int(r-V0)) + default: + return fmt.Sprintf("Reg(%d)", int(r)) + } +} + +// A RegSP represent a register and X31/W31 is regarded as SP/WSP. +type RegSP Reg + +func (RegSP) isArg() {} + +func (r RegSP) String() string { + switch Reg(r) { + case WSP: + return "WSP" + case SP: + return "SP" + default: + return Reg(r).String() + } +} + +type ImmShift struct { + imm uint16 + shift uint8 +} + +func (ImmShift) isArg() {} + +func (is ImmShift) String() string { + if is.shift == 0 { + return fmt.Sprintf("#%#x", is.imm) + } + if is.shift < 128 { + return fmt.Sprintf("#%#x, LSL #%d", is.imm, is.shift) + } + return fmt.Sprintf("#%#x, MSL #%d", is.imm, is.shift-128) +} + +type ExtShift uint8 + +const ( + _ ExtShift = iota + uxtb + uxth + uxtw + uxtx + sxtb + sxth + sxtw + sxtx + lsl + lsr + asr + ror +) + +func (extShift ExtShift) String() string { + switch extShift { + case uxtb: + return "UXTB" + + case uxth: + return "UXTH" + + case uxtw: + return "UXTW" + + case uxtx: + return "UXTX" + + case sxtb: + return "SXTB" + + case sxth: + return "SXTH" + + case sxtw: + return "SXTW" + + case sxtx: + return "SXTX" + + case lsl: + return "LSL" + + case lsr: + return "LSR" + + case asr: + return "ASR" + + case ror: + return "ROR" + } + return "" +} + +type RegExtshiftAmount struct { + reg Reg + extShift ExtShift + amount uint8 + show_zero bool +} + +func (RegExtshiftAmount) isArg() {} + +func (rea RegExtshiftAmount) String() string { + buf := rea.reg.String() + if rea.extShift != ExtShift(0) { + buf += ", " + rea.extShift.String() + if rea.amount != 0 { + buf += fmt.Sprintf(" #%d", rea.amount) + } else { + if rea.show_zero == true { + buf += fmt.Sprintf(" #%d", rea.amount) + } + } + } + return buf +} + +// A PCRel describes a memory address (usually a code label) +// as a distance relative to the program counter. +type PCRel int64 + +func (PCRel) isArg() {} + +func (r PCRel) String() string { + return fmt.Sprintf(".%+#x", uint64(r)) +} + +// An AddrMode is an ARM addressing mode. +type AddrMode uint8 + +const ( + _ AddrMode = iota + AddrPostIndex // [R], X - use address R, set R = R + X + AddrPreIndex // [R, X]! - use address R + X, set R = R + X + AddrOffset // [R, X] - use address R + X + AddrPostReg // [Rn], Rm - - use address Rn, set Rn = Rn + Rm +) + +// A MemImmediate is a memory reference made up of a base R and immediate X. +// The effective memory address is R or R+X depending on AddrMode. +type MemImmediate struct { + Base RegSP + Mode AddrMode + imm int32 +} + +func (MemImmediate) isArg() {} + +func (m MemImmediate) String() string { + R := m.Base.String() + X := fmt.Sprintf("#%d", m.imm) + + switch m.Mode { + case AddrOffset: + if X == "#0" { + return fmt.Sprintf("[%s]", R) + } + return fmt.Sprintf("[%s,%s]", R, X) + case AddrPreIndex: + return fmt.Sprintf("[%s,%s]!", R, X) + case AddrPostIndex: + return fmt.Sprintf("[%s],%s", R, X) + case AddrPostReg: + post := Reg(X0) + Reg(m.imm) + postR := post.String() + return fmt.Sprintf("[%s], %s", R, postR) + } + return fmt.Sprintf("unimplemented!") +} + +// A MemExtend is a memory reference made up of a base R and index expression X. +// The effective memory address is R or R+X depending on Index, Extend and Amount. +type MemExtend struct { + Base RegSP + Index Reg + Extend ExtShift + // Amount indicates the index shift amount (but also see ShiftMustBeZero field below). + Amount uint8 + // ShiftMustBeZero is set to true when the shift amount must be 0, even if the + // Amount field is not 0. In GNU syntax, a #0 shift amount is printed if Amount + // is not 0 but ShiftMustBeZero is true; #0 is not printed if Amount is 0 and + // ShiftMustBeZero is true. Both cases represent shift by 0 bit. + ShiftMustBeZero bool +} + +func (MemExtend) isArg() {} + +func (m MemExtend) String() string { + Rbase := m.Base.String() + RIndex := m.Index.String() + if m.ShiftMustBeZero { + if m.Amount != 0 { + return fmt.Sprintf("[%s,%s,%s #0]", Rbase, RIndex, m.Extend.String()) + } else { + if m.Extend != lsl { + return fmt.Sprintf("[%s,%s,%s]", Rbase, RIndex, m.Extend.String()) + } else { + return fmt.Sprintf("[%s,%s]", Rbase, RIndex) + } + } + } else { + if m.Amount != 0 { + return fmt.Sprintf("[%s,%s,%s #%d]", Rbase, RIndex, m.Extend.String(), m.Amount) + } else { + if m.Extend != lsl { + return fmt.Sprintf("[%s,%s,%s]", Rbase, RIndex, m.Extend.String()) + } else { + return fmt.Sprintf("[%s,%s]", Rbase, RIndex) + } + } + } +} + +// An Imm is an integer constant. +type Imm struct { + Imm uint32 + Decimal bool +} + +func (Imm) isArg() {} + +func (i Imm) String() string { + if !i.Decimal { + return fmt.Sprintf("#%#x", i.Imm) + } else { + return fmt.Sprintf("#%d", i.Imm) + } +} + +type Imm64 struct { + Imm uint64 + Decimal bool +} + +func (Imm64) isArg() {} + +func (i Imm64) String() string { + if !i.Decimal { + return fmt.Sprintf("#%#x", i.Imm) + } else { + return fmt.Sprintf("#%d", i.Imm) + } +} + +// An Imm_hint is an integer constant for HINT instruction. +type Imm_hint uint8 + +func (Imm_hint) isArg() {} + +func (i Imm_hint) String() string { + return fmt.Sprintf("#%#x", uint32(i)) +} + +// An Imm_clrex is an integer constant for CLREX instruction. +type Imm_clrex uint8 + +func (Imm_clrex) isArg() {} + +func (i Imm_clrex) String() string { + if i == 15 { + return "" + } + return fmt.Sprintf("#%#x", uint32(i)) +} + +// An Imm_dcps is an integer constant for DCPS[123] instruction. +type Imm_dcps uint16 + +func (Imm_dcps) isArg() {} + +func (i Imm_dcps) String() string { + if i == 0 { + return "" + } + return fmt.Sprintf("#%#x", uint32(i)) +} + +// Standard conditions. +type Cond struct { + Value uint8 + Invert bool +} + +func (Cond) isArg() {} + +func (c Cond) String() string { + cond31 := c.Value >> 1 + invert := bool((c.Value & 1) == 1) + invert = (invert != c.Invert) + switch cond31 { + case 0: + if invert { + return "NE" + } else { + return "EQ" + } + case 1: + if invert { + return "CC" + } else { + return "CS" + } + case 2: + if invert { + return "PL" + } else { + return "MI" + } + case 3: + if invert { + return "VC" + } else { + return "VS" + } + case 4: + if invert { + return "LS" + } else { + return "HI" + } + case 5: + if invert { + return "LT" + } else { + return "GE" + } + case 6: + if invert { + return "LE" + } else { + return "GT" + } + case 7: + return "AL" + } + return "" +} + +// An Imm_c is an integer constant for SYS/SYSL/TLBI instruction. +type Imm_c uint8 + +func (Imm_c) isArg() {} + +func (i Imm_c) String() string { + return fmt.Sprintf("C%d", uint8(i)) +} + +// An Imm_option is an integer constant for DMB/DSB/ISB instruction. +type Imm_option uint8 + +func (Imm_option) isArg() {} + +func (i Imm_option) String() string { + switch uint8(i) { + case 15: + return "SY" + case 14: + return "ST" + case 13: + return "LD" + case 11: + return "ISH" + case 10: + return "ISHST" + case 9: + return "ISHLD" + case 7: + return "NSH" + case 6: + return "NSHST" + case 5: + return "NSHLD" + case 3: + return "OSH" + case 2: + return "OSHST" + case 1: + return "OSHLD" + } + return fmt.Sprintf("#%#02x", uint8(i)) +} + +// An Imm_prfop is an integer constant for PRFM instruction. +type Imm_prfop uint8 + +func (Imm_prfop) isArg() {} + +func (i Imm_prfop) String() string { + prf_type := (i >> 3) & (1<<2 - 1) + prf_target := (i >> 1) & (1<<2 - 1) + prf_policy := i & 1 + var result string + + switch prf_type { + case 0: + result = "PLD" + case 1: + result = "PLI" + case 2: + result = "PST" + case 3: + return fmt.Sprintf("#%#02x", uint8(i)) + } + switch prf_target { + case 0: + result += "L1" + case 1: + result += "L2" + case 2: + result += "L3" + case 3: + return fmt.Sprintf("#%#02x", uint8(i)) + } + if prf_policy == 0 { + result += "KEEP" + } else { + result += "STRM" + } + return result +} + +type Pstatefield uint8 + +const ( + SPSel Pstatefield = iota + DAIFSet + DAIFClr +) + +func (Pstatefield) isArg() {} + +func (p Pstatefield) String() string { + switch p { + case SPSel: + return "SPSel" + case DAIFSet: + return "DAIFSet" + case DAIFClr: + return "DAIFClr" + default: + return "unimplemented" + } +} + +type Systemreg struct { + op0 uint8 + op1 uint8 + cn uint8 + cm uint8 + op2 uint8 +} + +func (Systemreg) isArg() {} + +func (s Systemreg) String() string { + return fmt.Sprintf("S%d_%d_C%d_C%d_%d", + s.op0, s.op1, s.cn, s.cm, s.op2) +} + +// An Imm_fp is a signed floating-point constant. +type Imm_fp struct { + s uint8 + exp int8 + pre uint8 +} + +func (Imm_fp) isArg() {} + +func (i Imm_fp) String() string { + var s, pre, numerator, denominator int16 + var result float64 + if i.s == 0 { + s = 1 + } else { + s = -1 + } + pre = s * int16(16+i.pre) + if i.exp > 0 { + numerator = (pre << uint8(i.exp)) + denominator = 16 + } else { + numerator = pre + denominator = (16 << uint8(-1*i.exp)) + } + result = float64(numerator) / float64(denominator) + return fmt.Sprintf("#%.18e", result) +} + +type Arrangement uint8 + +const ( + _ Arrangement = iota + ArrangementB + Arrangement8B + Arrangement16B + ArrangementH + Arrangement4H + Arrangement8H + ArrangementS + Arrangement2S + Arrangement4S + ArrangementD + Arrangement1D + Arrangement2D + Arrangement1Q +) + +func (a Arrangement) String() (result string) { + switch a { + case ArrangementB: + result = ".B" + case Arrangement8B: + result = ".8B" + case Arrangement16B: + result = ".16B" + case ArrangementH: + result = ".H" + case Arrangement4H: + result = ".4H" + case Arrangement8H: + result = ".8H" + case ArrangementS: + result = ".S" + case Arrangement2S: + result = ".2S" + case Arrangement4S: + result = ".4S" + case ArrangementD: + result = ".D" + case Arrangement1D: + result = ".1D" + case Arrangement2D: + result = ".2D" + case Arrangement1Q: + result = ".1Q" + } + return +} + +// Register with arrangement: ., { .8B, .8B}, +type RegisterWithArrangement struct { + r Reg + a Arrangement + cnt uint8 +} + +func (RegisterWithArrangement) isArg() {} + +func (r RegisterWithArrangement) String() string { + result := r.r.String() + result += r.a.String() + if r.cnt > 0 { + result = "{" + result + if r.cnt == 2 { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+1)&31) + result += ", " + r1.String() + r.a.String() + } else if r.cnt > 2 { + if (uint16(r.cnt) + ((uint16(r.r) - uint16(V0)) & 31)) > 32 { + for i := 1; i < int(r.cnt); i++ { + cur := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(i))&31) + result += ", " + cur.String() + r.a.String() + } + } else { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(r.cnt)-1)&31) + result += "-" + r1.String() + r.a.String() + } + } + result += "}" + } + return result +} + +// Register with arrangement and index: .[], +// { .B, .B }[]. +type RegisterWithArrangementAndIndex struct { + r Reg + a Arrangement + index uint8 + cnt uint8 +} + +func (RegisterWithArrangementAndIndex) isArg() {} + +func (r RegisterWithArrangementAndIndex) String() string { + result := r.r.String() + result += r.a.String() + if r.cnt > 0 { + result = "{" + result + if r.cnt == 2 { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+1)&31) + result += ", " + r1.String() + r.a.String() + } else if r.cnt > 2 { + if (uint16(r.cnt) + ((uint16(r.r) - uint16(V0)) & 31)) > 32 { + for i := 1; i < int(r.cnt); i++ { + cur := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(i))&31) + result += ", " + cur.String() + r.a.String() + } + } else { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(r.cnt)-1)&31) + result += "-" + r1.String() + r.a.String() + } + } + result += "}" + } + return fmt.Sprintf("%s[%d]", result, r.index) +} diff --git a/vendor/golang.org/x/arch/arm64/arm64asm/inst.json b/vendor/golang.org/x/arch/arm64/arm64asm/inst.json new file mode 100755 index 00000000..2d25c944 --- /dev/null +++ b/vendor/golang.org/x/arch/arm64/arm64asm/inst.json @@ -0,0 +1,1219 @@ +[{"Name":"ADC","Bits":"0|0|0|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADC , , ","Code":"","Alias":""}, +{"Name":"ADC","Bits":"1|0|0|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADC , , ","Code":"","Alias":""}, +{"Name":"ADCS","Bits":"0|0|1|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADCS , , ","Code":"","Alias":""}, +{"Name":"ADCS","Bits":"1|0|1|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADCS , , ","Code":"","Alias":""}, +{"Name":"ADD (extended register)","Bits":"0|0|0|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , {, {#}}","Code":"","Alias":""}, +{"Name":"ADD (extended register)","Bits":"1|0|0|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , {, {#}}","Code":"","Alias":""}, +{"Name":"ADD (immediate)","Bits":"0|0|0|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , #{, }","Code":"","Alias":"This instruction is used by the alias MOV (to/from SP)."}, +{"Name":"ADD (immediate)","Bits":"1|0|0|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , #{, }","Code":"","Alias":"This instruction is used by the alias MOV (to/from SP)."}, +{"Name":"ADD (shifted register)","Bits":"0|0|0|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , {, #}","Code":"","Alias":""}, +{"Name":"ADD (shifted register)","Bits":"1|0|0|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , {, #}","Code":"","Alias":""}, +{"Name":"ADDS (extended register)","Bits":"0|0|1|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , {, {#}}","Code":"","Alias":"This instruction is used by the alias CMN (extended register)."}, +{"Name":"ADDS (extended register)","Bits":"1|0|1|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , {, {#}}","Code":"","Alias":"This instruction is used by the alias CMN (extended register)."}, +{"Name":"ADDS (immediate)","Bits":"0|0|1|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , #{, }","Code":"","Alias":"This instruction is used by the alias CMN (immediate)."}, +{"Name":"ADDS (immediate)","Bits":"1|0|1|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , #{, }","Code":"","Alias":"This instruction is used by the alias CMN (immediate)."}, +{"Name":"ADDS (shifted register)","Bits":"0|0|1|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , {, #}","Code":"","Alias":"This instruction is used by the alias CMN (shifted register)."}, +{"Name":"ADDS (shifted register)","Bits":"1|0|1|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , {, #}","Code":"","Alias":"This instruction is used by the alias CMN (shifted register)."}, +{"Name":"ADR","Bits":"0|immlo:2|1|0|0|0|0|immhi:19|Rd:5","Arch":"Literal variant","Syntax":"ADR ,