Skip to content

Commit

Permalink
Socks stability improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
mrThe committed May 29, 2022
1 parent 1594a2a commit 56c0193
Show file tree
Hide file tree
Showing 5 changed files with 136 additions and 62 deletions.
2 changes: 1 addition & 1 deletion client/command/commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -2775,7 +2775,7 @@ func BindCommands(con *console.SliverConsoleClient) {
LongHelp: help.GetHelpFor([]string{consts.Socks5Str}),
Flags: func(f *grumble.Flags) {
f.Int("t", "timeout", defaultTimeout, "router timeout in seconds")
f.Int("i", "id", 0, "id of portfwd to remove")
f.Uint64("i", "id", 0, "id of portfwd to remove")
},
Run: func(ctx *grumble.Context) error {
con.Println()
Expand Down
2 changes: 1 addition & 1 deletion client/command/socks/socks-stop.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (

// SocksStopCmd - Remove an existing tunneled port forward
func SocksStopCmd(ctx *grumble.Context, con *console.SliverConsoleClient) {
socksID := ctx.Flags.Int("id")
socksID := ctx.Flags.Uint64("id")
if socksID < 1 {
con.PrintErrorf("Must specify a valid socks5 id\n")
return
Expand Down
104 changes: 70 additions & 34 deletions client/core/socks.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,10 @@ package core

import (
"context"
"fmt"
"io"
"log"
"net"
"sync"
"sync/atomic"
"time"

"github.com/bishopfox/sliver/protobuf/clientpb"
Expand All @@ -37,16 +36,16 @@ import (
var (
// SocksProxies - Struct instance that holds all the portfwds
SocksProxies = socksProxy{
tcpProxies: map[int]*SocksProxy{},
tcpProxies: map[uint64]*SocksProxy{},
mutex: &sync.RWMutex{},
}
SocksConnPool = sync.Map{}
SocksProxyID = 0
SocksProxyID = (uint64)(0)
)

// PortfwdMeta - Metadata about a portfwd listener
type SocksProxyMeta struct {
ID int
ID uint64
SessionID string
BindAddr string
Username string
Expand All @@ -60,14 +59,28 @@ type TcpProxy struct {
Password string
BindAddr string
Listener net.Listener
stopChan bool
KeepAlivePeriod time.Duration
DialTimeout time.Duration
}

func (tcp *TcpProxy) Stop() error {
err := tcp.Listener.Close()

// Closing all connections in pool
SocksConnPool.Range(func(key, value interface{}) bool {
con := value.(net.Conn)

con.Close()

return err == nil
})

return err
}

// SocksProxy - Tracks portfwd<->tcpproxy
type SocksProxy struct {
ID int
ID uint64
ChannelProxy *TcpProxy
}

Expand All @@ -83,14 +96,15 @@ func (p *SocksProxy) GetMetadata() *SocksProxyMeta {
}

type socksProxy struct {
tcpProxies map[int]*SocksProxy
tcpProxies map[uint64]*SocksProxy
mutex *sync.RWMutex
}

// Add - Add a TCP proxy instance
func (f *socksProxy) Add(tcpProxy *TcpProxy) *SocksProxy {
f.mutex.Lock()
defer f.mutex.Unlock()

Sockser := &SocksProxy{
ID: nextSocksProxyID(),
ChannelProxy: tcpProxy,
Expand All @@ -101,67 +115,75 @@ func (f *socksProxy) Add(tcpProxy *TcpProxy) *SocksProxy {
}

func (f *socksProxy) Start(tcpProxy *TcpProxy) error {
proxy, err := tcpProxy.Rpc.SocksProxy(context.Background())
ctx, cancel := context.WithCancel(context.Background())
proxy, err := tcpProxy.Rpc.SocksProxy(ctx)
defer cancel()

if err != nil {
return err
}
go func() {
for !tcpProxy.stopChan {
for {
FromImplantSequence := 0
p, err := proxy.Recv()
socksData, err := proxy.Recv()
if err != nil {
log.Printf("Failed to Recv from proxy, %s\n", err)
return
}

if v, ok := SocksConnPool.Load(p.TunnelID); ok {
n := v.(net.Conn)
if p.CloseConn {
n.Close()
SocksConnPool.Delete(p.TunnelID)
if v, ok := SocksConnPool.Load(socksData.TunnelID); ok {
conn := v.(net.Conn)

if socksData.CloseConn {
conn.Close()
SocksConnPool.Delete(socksData.TunnelID)
continue
}
log.Printf("[socks] agent to Server To (Client to User) Data Sequence %d , Data Size %d \n", FromImplantSequence, len(p.Data))
log.Printf("[socks] agent to Server To (Client to User) Data Sequence %d , Data Size %d \n", FromImplantSequence, len(socksData.Data))
//fmt.Printf("recv data len %d \n", len(p.Data))
_, err := n.Write(p.Data)
_, err := conn.Write(socksData.Data)
if err != nil {
log.Printf("Failed to write data to proxy connection, %s\n", err)
continue
}
FromImplantSequence++
}
}
}()
for !tcpProxy.stopChan {
l, err := tcpProxy.Listener.Accept()
for {
connection, err := tcpProxy.Listener.Accept()
if err != nil {
return err
log.Printf("Failed to accept new listener, probably already closed: %s\n", err)
break
}
rpcSocks, err := tcpProxy.Rpc.CreateSocks(context.Background(), &sliverpb.Socks{
SessionID: tcpProxy.Session.ID,
})
if err != nil {
fmt.Println(err)
return err
log.Printf("Failed rcp call to create socks %s\n", err)
break
}

go connect(l, proxy, &sliverpb.SocksData{
go connect(connection, proxy, &sliverpb.SocksData{
Username: tcpProxy.Username,
Password: tcpProxy.Password,
TunnelID: rpcSocks.TunnelID,
Request: &commonpb.Request{SessionID: rpcSocks.SessionID},
})
}
log.Printf("Socks Stop -> %s\n", tcpProxy.BindAddr)
tcpProxy.Listener.Close()
tcpProxy.Stop() // well, at this moment we already in stop state, but anyway
proxy.CloseSend()
return nil
}

// Remove - Remove a TCP proxy instance
func (f *socksProxy) Remove(socksId int) bool {
func (f *socksProxy) Remove(socksId uint64) bool {
f.mutex.Lock()
defer f.mutex.Unlock()

if _, ok := f.tcpProxies[socksId]; ok {
f.tcpProxies[socksId].ChannelProxy.stopChan = true
f.tcpProxies[socksId].ChannelProxy.Stop()
delete(f.tcpProxies, socksId)
return true
}
Expand All @@ -179,9 +201,8 @@ func (f *socksProxy) List() []*SocksProxyMeta {
return socksProxy
}

func nextSocksProxyID() int {
SocksProxyID++
return SocksProxyID
func nextSocksProxyID() uint64 {
return atomic.AddUint64(&SocksProxyID, 1)
}

const leakyBufSize = 4108 // data.len(2) + hmacsha1(10) + data(4096)
Expand All @@ -192,25 +213,40 @@ func connect(conn net.Conn, stream rpcpb.SliverRPC_SocksProxyClient, frame *sliv

SocksConnPool.Store(frame.TunnelID, conn)

defer func() {
// It's neccessary to close and remove connection once we done with it
c, ok := SocksConnPool.LoadAndDelete(frame.TunnelID)
if !ok {
return
}
conn := c.(net.Conn)

conn.Close()

log.Printf("[socks] connection closed")
}()

log.Printf("tcp conn %q<--><-->%q \n", conn.LocalAddr(), conn.RemoteAddr())

buff := leakyBuf.Get()
defer leakyBuf.Put(buff)
var ToImplantSequence uint64 = 0
for {
n, err := conn.Read(buff)

if err != nil {
if err == io.EOF {
return
}
continue
log.Printf("[socks] (User to Client) failed to read data, %s ", err)
// Error basically means that the connection is closed(EOF) OR deadline exceeded
// In any of that cases, it's better to just giveup
return
}
if n > 0 {
frame.Data = buff[:n]
frame.Sequence = ToImplantSequence
log.Printf("[socks] (User to Client) to Server to agent Data Sequence %d , Data Size %d \n", ToImplantSequence, len(frame.Data))
err := stream.Send(frame)
if err != nil {
log.Printf("[socks] (User to Client) failed to send data, %s ", err)
return
}
ToImplantSequence++
Expand Down
2 changes: 2 additions & 0 deletions server/core/tunnels.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ func (t *tunnels) Create(sessionID string) *Tunnel {
// There is no another way around it, if we want to stick to async processing as we do now.
// All additional changes requires changes on implants(like sequencing for close messages),
// and as there is a goal to keep compatability we don't do that at the moment.
// So there is trade off - more stability or more speed. Or rewriting implant logic.
// At the moment, i see it affects only `shell` command and locking it for 10 seconds on exit. Not a big deal.
func (t *tunnels) ScheduleClose(tunnelID uint64) {
tunnel := t.Get(tunnelID)
if tunnel == nil {
Expand Down
Loading

0 comments on commit 56c0193

Please sign in to comment.