Skip to content

Commit

Permalink
runtime: de-duplicate span scavenging
Browse files Browse the repository at this point in the history
Currently, span scavenging was done nearly identically in two different
locations. This change deduplicates that into one shared routine.

For #14045.

Change-Id: I15006b2c9af0e70b7a9eae9abb4168d3adca3860
Reviewed-on: https://go-review.googlesource.com/c/139297
Run-TryBot: Michael Knyszek <[email protected]>
TryBot-Result: Gobot Gobot <[email protected]>
Reviewed-by: Austin Clements <[email protected]>
  • Loading branch information
mknyszek committed Oct 17, 2018
1 parent de31f63 commit e508a5f
Showing 1 changed file with 32 additions and 52 deletions.
84 changes: 32 additions & 52 deletions src/runtime/mheap.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
return
}

func (s *mspan) scavenge() uintptr {
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return 0
}
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return 0
}
memstats.heap_released += uint64(released)
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
return released
}

// recordspan adds a newly allocated span to h.allspans.
//
// This only happens the first time a span is allocated from
Expand Down Expand Up @@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {

func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
s := t.spanKey
var sumreleased uintptr
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return sumreleased
}
if released := s.scavenge(); released != 0 {
return released
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return sumreleased
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
}
return sumreleased
return 0
}

func scavengelist(list *mSpanList, now, limit uint64) uintptr {
Expand All @@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
continue
}
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
continue
}
}
len := end - start

released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
continue
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
sumreleased += s.scavenge()
}
return sumreleased
}
Expand Down

0 comments on commit e508a5f

Please sign in to comment.