Skip to content

Commit

Permalink
[dev.ssa] cmd/compile: implement Defer, RetJmp on SSA for ARM
Browse files Browse the repository at this point in the history
Also fix argument offset for runtime calls.

Also fix LoadReg/StoreReg by generating instructions by type.

Progress on SSA backend for ARM. Still not complete.
Tests append_ssa.go, assert_ssa.go, loadstore_ssa.go, short_ssa.go, and
deferNoReturn.go in cmd/compile/internal/gc/testdata passed.

Updates #15365.

Change-Id: I0f0a2398cab8bbb461772a55241a16a7da2ecedf
Reviewed-on: https://go-review.googlesource.com/23212
Reviewed-by: David Chase <[email protected]>
  • Loading branch information
cherrymui committed May 27, 2016
1 parent 8357ec3 commit d108bc0
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 8 deletions.
91 changes: 86 additions & 5 deletions src/cmd/compile/internal/arm/ssa.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,48 @@ var ssaRegToReg = []int16{
0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
}

// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
panic("load floating point register is not implemented")
} else {
switch t.Size() {
case 1:
if t.IsSigned() {
return arm.AMOVB
} else {
return arm.AMOVBU
}
case 2:
if t.IsSigned() {
return arm.AMOVH
} else {
return arm.AMOVHU
}
case 4:
return arm.AMOVW
}
}
panic("bad load type")
}

// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
if t.IsFloat() {
panic("store floating point register is not implemented")
} else {
switch t.Size() {
case 1:
return arm.AMOVB
case 2:
return arm.AMOVH
case 4:
return arm.AMOVW
}
}
panic("bad store type")
}

func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
Expand All @@ -57,8 +99,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = y
case ssa.OpLoadReg:
// TODO: by type
p := gc.Prog(arm.AMOVW)
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
Expand All @@ -85,8 +130,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
}
case ssa.OpStoreReg:
// TODO: by type
p := gc.Prog(arm.AMOVW)
if v.Type.IsFlags() {
v.Unimplementedf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
n, off := gc.AutoVar(v)
Expand Down Expand Up @@ -284,7 +332,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCALLstatic:
// TODO: deferreturn
if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
Expand Down Expand Up @@ -467,12 +525,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}

case ssa.BlockDefer:
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(arm.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm.REG_R0
p = gc.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}

case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here

case ssa.BlockRet:
gc.Prog(obj.ARET)

case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))

case ssa.BlockARMEQ, ssa.BlockARMNE,
ssa.BlockARMLT, ssa.BlockARMGE,
ssa.BlockARMLE, ssa.BlockARMGT,
Expand Down
8 changes: 5 additions & 3 deletions src/cmd/compile/internal/gc/ssa.go
Original file line number Diff line number Diff line change
Expand Up @@ -2597,9 +2597,11 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(Types[TUINT32], int32(stksize))
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem())
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp)
addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem())
addr = s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
Expand Down Expand Up @@ -2956,7 +2958,7 @@ func (s *state) check(cmp *ssa.Value, fn *Node) {
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
var off int64 // TODO: arch-dependent starting offset?
off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
Expand Down

0 comments on commit d108bc0

Please sign in to comment.