-
Notifications
You must be signed in to change notification settings - Fork 1.9k
/
Copy pathsessionstore.go
277 lines (228 loc) · 6.72 KB
/
sessionstore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
/******************************************************************************
*
* Description:
*
* Session management.
*
*****************************************************************************/
package main
import (
"container/list"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/tinode/chat/pbx"
"github.com/tinode/chat/server/logs"
"github.com/tinode/chat/server/store"
"github.com/tinode/chat/server/store/types"
)
// WaitGroup with a semaphore functionality
// (limiting number of threads/goroutines accessing the guarded resource simultaneously).
type boundedWaitGroup struct {
wg sync.WaitGroup
sem chan struct{}
}
func newBoundedWaitGroup(capacity int) *boundedWaitGroup {
return &boundedWaitGroup{sem: make(chan struct{}, capacity)}
}
func (w *boundedWaitGroup) Add(delta int) {
if delta <= 0 {
return
}
for i := 0; i < delta; i++ {
w.sem <- struct{}{}
}
w.wg.Add(delta)
}
func (w *boundedWaitGroup) Done() {
select {
case _, ok := <-w.sem:
if !ok {
logs.Err.Panicln("boundedWaitGroup.sem closed.")
}
default:
logs.Err.Panicln("boundedWaitGroup.Done() called before Add().")
}
w.wg.Done()
}
func (w *boundedWaitGroup) Wait() {
w.wg.Wait()
}
// SessionStore holds live sessions. Long polling sessions are stored in a linked list with
// most recent sessions on top. In addition all sessions are stored in a map indexed by session ID.
type SessionStore struct {
lock sync.Mutex
// Support for long polling sessions: a list of sessions sorted by last access time.
// Needed for cleaning abandoned sessions.
lru *list.List
lifeTime time.Duration
// All sessions indexed by session ID
sessCache map[string]*Session
}
// NewSession creates a new session and saves it to the session store.
func (ss *SessionStore) NewSession(conn any, sid string) (*Session, int) {
var s Session
if sid == "" {
s.sid = store.Store.GetUidString()
} else {
s.sid = sid
}
ss.lock.Lock()
if _, found := ss.sessCache[s.sid]; found {
logs.Err.Fatalln("ERROR! duplicate session ID", s.sid)
}
ss.lock.Unlock()
switch c := conn.(type) {
case *websocket.Conn:
s.proto = WEBSOCK
s.ws = c
case http.ResponseWriter:
s.proto = LPOLL
// no need to store c for long polling, it changes with every request
case *ClusterNode:
s.proto = MULTIPLEX
s.clnode = c
case pbx.Node_MessageLoopServer:
s.proto = GRPC
s.grpcnode = c
default:
logs.Err.Panicln("session: unknown connection type", conn)
}
s.subs = make(map[string]*Subscription)
s.send = make(chan any, sendQueueLimit+32) // buffered
s.stop = make(chan any, 1) // Buffered by 1 just to make it non-blocking
s.detach = make(chan string, 64) // buffered
s.bkgTimer = time.NewTimer(time.Hour)
s.bkgTimer.Stop()
// Make sure at most 1 request is modifying session/topic state at any time.
// TODO: use Mutex & CondVar?
s.inflightReqs = newBoundedWaitGroup(1)
s.lastTouched = time.Now()
ss.lock.Lock()
if s.proto == LPOLL {
// Only LP sessions need to be sorted by last active
s.lpTracker = ss.lru.PushFront(&s)
}
ss.sessCache[s.sid] = &s
// Expire stale long polling sessions: ss.lru contains only long polling sessions.
// If ss.lru is empty this is a noop.
var expired []*Session
expire := s.lastTouched.Add(-ss.lifeTime)
for elem := ss.lru.Back(); elem != nil; elem = ss.lru.Back() {
sess := elem.Value.(*Session)
if sess.lastTouched.Before(expire) {
ss.lru.Remove(elem)
delete(ss.sessCache, sess.sid)
expired = append(expired, sess)
} else {
break // don't need to traverse further
}
}
numSessions := len(ss.sessCache)
statsSet("LiveSessions", int64(numSessions))
statsInc("TotalSessions", 1)
ss.lock.Unlock()
// Deleting long polling sessions.
for _, sess := range expired {
// This locks the session. Thus cleaning up outside of the
// sessionStore lock. Otherwise deadlock.
sess.cleanUp(true)
}
return &s, numSessions
}
// Get fetches a session from store by session ID.
func (ss *SessionStore) Get(sid string) *Session {
ss.lock.Lock()
defer ss.lock.Unlock()
if sess := ss.sessCache[sid]; sess != nil {
if sess.proto == LPOLL {
ss.lru.MoveToFront(sess.lpTracker)
sess.lastTouched = time.Now()
}
return sess
}
return nil
}
// Delete removes session from store.
func (ss *SessionStore) Delete(s *Session) {
ss.lock.Lock()
defer ss.lock.Unlock()
delete(ss.sessCache, s.sid)
if s.proto == LPOLL {
ss.lru.Remove(s.lpTracker)
}
statsSet("LiveSessions", int64(len(ss.sessCache)))
}
// Range calls given function for all sessions. It stops if the function returns false.
func (ss *SessionStore) Range(f func(sid string, s *Session) bool) {
ss.lock.Lock()
for sid, s := range ss.sessCache {
if !f(sid, s) {
break
}
}
ss.lock.Unlock()
}
// Shutdown terminates sessionStore. No need to clean up.
// Don't send to clustered sessions, their servers are not being shut down.
func (ss *SessionStore) Shutdown() {
ss.lock.Lock()
defer ss.lock.Unlock()
shutdown := NoErrShutdown(types.TimeNow())
for _, s := range ss.sessCache {
if !s.isMultiplex() {
_, data := s.serialize(shutdown)
s.stopSession(data)
}
}
// TODO: Consider broadcasting shutdown to other cluster nodes.
logs.Info.Println("SessionStore shut down, sessions terminated:", len(ss.sessCache))
}
// EvictUser terminates all sessions of a given user.
func (ss *SessionStore) EvictUser(uid types.Uid, skipSid string) {
ss.lock.Lock()
defer ss.lock.Unlock()
// FIXME: this probably needs to be optimized. This may take very long time if the node hosts 100000 sessions.
evicted := NoErrEvicted("", "", types.TimeNow())
evicted.AsUser = uid.UserId()
for _, s := range ss.sessCache {
if s.uid == uid && !s.isMultiplex() && s.sid != skipSid {
_, data := s.serialize(evicted)
s.stopSession(data)
delete(ss.sessCache, s.sid)
if s.proto == LPOLL {
ss.lru.Remove(s.lpTracker)
}
}
}
statsSet("LiveSessions", int64(len(ss.sessCache)))
}
// NodeRestarted removes stale sessions from a restarted cluster node.
// - nodeName is the name of affected node
// - fingerprint is the new fingerprint of the node.
func (ss *SessionStore) NodeRestarted(nodeName string, fingerprint int64) {
ss.lock.Lock()
defer ss.lock.Unlock()
for _, s := range ss.sessCache {
if !s.isMultiplex() || s.clnode.name != nodeName {
continue
}
if s.clnode.fingerprint != fingerprint {
s.stopSession(nil)
delete(ss.sessCache, s.sid)
}
}
statsSet("LiveSessions", int64(len(ss.sessCache)))
}
// NewSessionStore initializes a session store.
func NewSessionStore(lifetime time.Duration) *SessionStore {
ss := &SessionStore{
lru: list.New(),
lifeTime: lifetime,
sessCache: make(map[string]*Session),
}
statsRegisterInt("LiveSessions")
statsRegisterInt("TotalSessions")
return ss
}