Skip to content

Commit

Permalink
[dev.ssa] cmd/compile: ensure alignment for Zero and Move in SSA for ARM
Browse files Browse the repository at this point in the history
Encode the size and the alignment into AuxInt of Zero and Move ops.
On AMD64, we simply don't look at the alignment. On ARM and PPC64, we
only generate aligned stores.

Updates #15365.

Change-Id: Ifdcc205c364f67c4516b9adebfe7d50d223b6863
Reviewed-on: https://go-review.googlesource.com/24511
Reviewed-by: David Chase <[email protected]>
Reviewed-by: Keith Randall <[email protected]>
Run-TryBot: Cherry Zhang <[email protected]>
TryBot-Result: Gobot Gobot <[email protected]>
  • Loading branch information
cherrymui committed Jul 2, 2016
1 parent 95427d2 commit f553178
Show file tree
Hide file tree
Showing 14 changed files with 957 additions and 456 deletions.
32 changes: 22 additions & 10 deletions src/cmd/compile/internal/arm/ssa.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,15 +547,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
return
}
case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero:
case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero, ssa.OpARMLoweredZeroU:
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove:
case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove, ssa.OpARMLoweredMoveU:
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
Expand Down Expand Up @@ -585,46 +585,58 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpARMLoweredZero:
case ssa.OpARMLoweredZero, ssa.OpARMLoweredZeroU:
// MOVW.P Rarg2, 4(R1)
// CMP Rarg1, R1
// BLT -2(PC)
// arg1 is the end of memory to zero
// arg2 is known to be zero
p := gc.Prog(arm.AMOVW)
var sz int64 = 4
mov := arm.AMOVW
if v.Op == ssa.OpARMLoweredZeroU { // unaligned
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[2])
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1
p.To.Offset = 4
p.To.Offset = sz
p2 := gc.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = gc.SSARegNum(v.Args[1])
p2.Reg = arm.REG_R1
p3 := gc.Prog(arm.ABLT)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARMLoweredMove:
case ssa.OpARMLoweredMove, ssa.OpARMLoweredMoveU:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
// CMP Rarg2, R1
// BLT -3(PC)
// arg2 is the end of src
p := gc.Prog(arm.AMOVW)
var sz int64 = 4
mov := arm.AMOVW
if v.Op == ssa.OpARMLoweredMoveU { // unaligned
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1
p.From.Offset = 4
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
p2 := gc.Prog(arm.AMOVW)
p2 := gc.Prog(mov)
p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2
p2.To.Offset = 4
p2.To.Offset = sz
p3 := gc.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = gc.SSARegNum(v.Args[2])
Expand Down
15 changes: 10 additions & 5 deletions src/cmd/compile/internal/gc/ssa.go
Original file line number Diff line number Diff line change
Expand Up @@ -2254,7 +2254,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if haspointers(et) {
s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(et), addr, arg.v, s.mem())
}
}
}
Expand Down Expand Up @@ -2387,14 +2387,14 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32,
if deref {
// Treat as a mem->mem move.
if right == nil {
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, SizeAlignAuxInt(t), addr, s.mem())
return
}
if wb {
s.insertWBmove(t, addr, right, line, rightIsVolatile)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), addr, right, s.mem())
return
}
// Treat as a store.
Expand Down Expand Up @@ -3080,7 +3080,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
tmp := temp(t)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
tmpaddr, _ := s.addr(tmp, true)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), tmpaddr, right, s.mem())
// Issue typedmemmove call.
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
Expand All @@ -3090,7 +3090,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
s.endBlock().AddEdgeTo(bEnd)

s.startBlock(bElse)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd)

s.startBlock(bEnd)
Expand Down Expand Up @@ -4190,6 +4190,11 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
}
}

// SizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t.
func SizeAlignAuxInt(t *Type) int64 {
return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
}

// extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/ssa/deadstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func dse(f *Func) {
} else {
// zero addr mem
sz := v.Args[0].Type.ElemType().Size()
if v.AuxInt != sz {
if SizeAndAlign(v.AuxInt).Size() != sz {
f.Fatalf("mismatched zero/store sizes: %d and %d [%s]",
v.AuxInt, sz, v.LongString())
}
Expand Down
95 changes: 55 additions & 40 deletions src/cmd/compile/internal/ssa/gen/AMD64.rules
Original file line number Diff line number Diff line change
Expand Up @@ -302,39 +302,47 @@
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)

// Lowering moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
(Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
(Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
(Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem)
(Move [3] dst src mem) ->
(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstore dst (MOVQload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 -> (MOVOstore dst (MOVOload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVWstore dst (MOVWload src mem) mem))
(Move [5] dst src mem) ->
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstore [4] dst (MOVBload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
(Move [6] dst src mem) ->
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
(Move [7] dst src mem) ->
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstore [3] dst (MOVLload [3] src mem)
(MOVLstore dst (MOVLload src mem) mem))
(Move [size] dst src mem) && size > 8 && size < 16 ->
(MOVQstore [size-8] dst (MOVQload [size-8] src mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 ->
(MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem)
(MOVQstore dst (MOVQload src mem) mem))

// Adjust moves to be a multiple of 16 bytes.
(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 <= 8 ->
(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
(Move [s] dst src mem)
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVQstore dst (MOVQload src mem) mem))
(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 > 8 ->
(Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
(Move [s] dst src mem)
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVOstore dst (MOVOload src mem) mem))

// Medium copying uses a duff device.
(Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice ->
(DUFFCOPY [14*(64-size/16)] dst src mem)
(Move [s] dst src mem)
&& SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0
&& !config.noDuffDevice ->
(DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
// 14 and 64 are magic constants. 14 is the number of bytes to encode:
// MOVUPS (SI), X0
// ADDQ $16, SI
Expand All @@ -343,57 +351,64 @@
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.

// Large copying uses REP MOVSQ.
(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem)
(Move [s] dst src mem) && (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)

// Lowering Zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstoreconst [0] destptr mem)

(Zero [3] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))

// Strip off any fractional word zeroing.
(Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
(Zero [size-size%8] (ADDQconst destptr [size%8])
(Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 ->
(Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (ADDQconst destptr [SizeAndAlign(s).Size()%8])
(MOVQstoreconst [0] destptr mem))

// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))

// Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
(Zero [s] destptr mem)
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0
&& !config.noDuffDevice ->
(Zero [SizeAndAlign(s).Size()-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [s] destptr mem)
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(SizeAndAlign(s).Size())]
(ADDQconst [duffAdj(SizeAndAlign(s).Size())] destptr) (MOVOconst [0])
mem)

// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
(Zero [s] destptr mem)
&& (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32))
&& SizeAndAlign(s).Size()%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)

// Lowering constants
(Const8 [val]) -> (MOVLconst [val])
Expand Down
Loading

0 comments on commit f553178

Please sign in to comment.