Skip to content

Commit

Permalink
Fix race condition on closing tunnels
Browse files Browse the repository at this point in the history
  • Loading branch information
mrThe committed May 29, 2022
1 parent 2010b08 commit f7633bb
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 13 deletions.
87 changes: 81 additions & 6 deletions server/core/tunnels.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"encoding/binary"
"errors"
"sync"
"time"

"github.com/bishopfox/sliver/protobuf/rpcpb"
"github.com/bishopfox/sliver/protobuf/sliverpb"
Expand All @@ -40,6 +41,12 @@ var (
ErrInvalidTunnelID = errors.New("invalid tunnel ID")
)

const (
// delayBeforeClose - delay before closing the tunnel.
// I assume 10 seconds may be an overkill for a good connection, but it looks good enough for less stable one.
delayBeforeClose = 10 * time.Second
)

// Tunnel - Essentially just a mapping between a specific client and sliver
// with an identifier, these tunnels are full duplex. The server doesn't really
// care what data gets passed back and forth it just facilitates the connection
Expand All @@ -54,6 +61,43 @@ type Tunnel struct {
FromImplantSequence uint64

Client rpcpb.SliverRPC_TunnelDataServer

mutex *sync.RWMutex
lastDataMessageTime time.Time
}

func NewTunnel(id uint64, sessionID string) *Tunnel {
return &Tunnel{
ID: id,
SessionID: sessionID,
ToImplant: make(chan []byte),
FromImplant: make(chan *sliverpb.TunnelData),

mutex: &sync.RWMutex{},
lastDataMessageTime: time.Now(), // need to be initialized
}
}

func (t *Tunnel) setLastMessageTime() {
t.mutex.Lock()
defer t.mutex.Unlock()

t.lastDataMessageTime = time.Now()
}

func (t *Tunnel) GetLastMessageTime() time.Time {
t.mutex.RLock()
defer t.mutex.RUnlock()

return t.lastDataMessageTime
}

func (t *Tunnel) SendDataFromImplant(tunnelData *sliverpb.TunnelData) {
// Setting the date right before and right after message, since channel can be blocked for some amount of time
t.setLastMessageTime()
defer t.setLastMessageTime()

t.FromImplant <- tunnelData
}

type tunnels struct {
Expand All @@ -64,19 +108,50 @@ type tunnels struct {
func (t *tunnels) Create(sessionID string) *Tunnel {
tunnelID := NewTunnelID()
session := Sessions.Get(sessionID)
tunnel := &Tunnel{
ID: tunnelID,
SessionID: session.ID,
ToImplant: make(chan []byte),
FromImplant: make(chan *sliverpb.TunnelData),
}

tunnel := NewTunnel(
tunnelID,
session.ID,
)

t.mutex.Lock()
defer t.mutex.Unlock()
t.tunnels[tunnel.ID] = tunnel

return tunnel
}

// ScheduleClose - schedules a close for tunnel, must be called as routine.
// will close it once there is no data for at least delayBeforeClose delay since last message
// This is _necessary_ since we processing messages asynchronously
// and if tunnelCloseHandler routine will fire before tunnelDataHandler routine we will lose some data
// (this is what happends for socks and portfwd)
// There is no another way around it, if we want to stick to async processing as we do now.
// All additional changes requires changes on implants(like sequencing for close messages),
// and as there is a goal to keep compatability we don't do that at the moment.
func (t *tunnels) ScheduleClose(tunnelID uint64) {
tunnel := t.Get(tunnelID)
if tunnel == nil {
return
}

timeDelta := time.Now().Sub(tunnel.GetLastMessageTime())

coreLog.Printf("Scheduled close for channel %d (delta: %v)", tunnelID, timeDelta)

if timeDelta >= delayBeforeClose {
coreLog.Printf("Closing channel %d", tunnelID)
t.Close(tunnelID)
} else {
// Reschedule
coreLog.Printf("Rescheduling closing channel %d", tunnelID)
time.Sleep(delayBeforeClose - timeDelta + time.Second)
go t.ScheduleClose(tunnelID)
}
}

// Close - closing tunnel
// It's prefered to use ScheduleClose function if you don't 100% sure there is no more data to receive
func (t *tunnels) Close(tunnelID uint64) error {
t.mutex.Lock()
defer t.mutex.Unlock()
Expand Down
10 changes: 7 additions & 3 deletions server/handlers/sessions.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,15 +110,18 @@ func tunnelDataHandler(implantConn *core.ImplantConnection, data []byte) *sliver
defer tunnelHandlerMutex.Unlock()
tunnelData := &sliverpb.TunnelData{}
proto.Unmarshal(data, tunnelData)

sessionHandlerLog.Debugf("[DATA] Sequence on tunel %d, %d, data: %s", tunnelData.TunnelID, tunnelData.Sequence, tunnelData.Data)

tunnel := core.Tunnels.Get(tunnelData.TunnelID)
if tunnel != nil {
if session.ID == tunnel.SessionID {
tunnel.FromImplant <- tunnelData
tunnel.SendDataFromImplant(tunnelData)
} else {
sessionHandlerLog.Warnf("Warning: Session %s attempted to send data on tunnel it did not own", session.ID)
}
} else {
sessionHandlerLog.Warnf("Data sent on nil tunnel %d", tunnelData.TunnelID)
sessionHandlerLog.Warnf("Data sent on nil tunnel %d, %s", tunnelData.TunnelID, tunnelData.Data)
}
return nil
}
Expand All @@ -134,14 +137,15 @@ func tunnelCloseHandler(implantConn *core.ImplantConnection, data []byte) *slive

tunnelData := &sliverpb.TunnelData{}
proto.Unmarshal(data, tunnelData)
sessionHandlerLog.Debugf("[CLOSE] Sequence on tunel %d, %d, data: %s", tunnelData.TunnelID, tunnelData.Sequence, tunnelData.Data)
if !tunnelData.Closed {
return nil
}
tunnel := core.Tunnels.Get(tunnelData.TunnelID)
if tunnel != nil {
if session.ID == tunnel.SessionID {
sessionHandlerLog.Infof("Closing tunnel %d", tunnel.ID)
core.Tunnels.Close(tunnel.ID)
go core.Tunnels.ScheduleClose(tunnel.ID)
} else {
sessionHandlerLog.Warnf("Warning: Session %s attempted to send data on tunnel it did not own", session.ID)
}
Expand Down
6 changes: 2 additions & 4 deletions server/rpc/rpc-tunnel.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,10 @@ func (s *Server) CreateTunnel(ctx context.Context, req *sliverpb.Tunnel) (*slive

// CloseTunnel - Client requests we close a tunnel
func (s *Server) CloseTunnel(ctx context.Context, req *sliverpb.Tunnel) (*commonpb.Empty, error) {
err := core.Tunnels.Close(req.TunnelID)
go core.Tunnels.ScheduleClose(req.TunnelID)
toImplantCache.DeleteTun(req.TunnelID)
fromImplantCache.DeleteTun(req.TunnelID)
if err != nil {
return nil, err
}

return &commonpb.Empty{}, nil
}

Expand Down

0 comments on commit f7633bb

Please sign in to comment.