forked from piskvorky/sqlitedict
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsqlitedict.py
executable file
·364 lines (300 loc) · 12.1 KB
/
sqlitedict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Hacked together from:
# * http://code.activestate.com/recipes/576638-draft-for-an-sqlite3-based-dbm/
# * http://code.activestate.com/recipes/526618/
#
# Use the code in any way you like (at your own risk), it's public domain.
"""
A lightweight wrapper around Python's sqlite3 database, with a dict-like interface
and multi-thread access support::
>>> mydict = SqliteDict('some.db', autocommit=True) # the mapping will be persisted to file `some.db`
>>> mydict['some_key'] = any_picklable_object
>>> print mydict['some_key']
>>> print len(mydict) # etc... all dict functions work
Pickle is used internally to serialize the values. Keys are strings.
If you don't use autocommit (default is no autocommit for performance), then
don't forget to call `mydict.commit()` when done with a transaction.
"""
import sqlite3
import os
import tempfile
import random
import logging
from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL
from UserDict import DictMixin
from Queue import Queue
from threading import Thread
logger = logging.getLogger('sqlitedict')
def open(*args, **kwargs):
"""See documentation of the SqlDict class."""
return SqliteDict(*args, **kwargs)
def encode(obj):
"""Serialize an object using pickle to a binary format accepted by SQLite."""
return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL))
def decode(obj):
"""Deserialize objects retrieved from SQLite."""
return loads(str(obj))
class SqliteDict(object, DictMixin):
def __init__(self, filename=None, tablename='unnamed', flag='c',
autocommit=False, journal_mode="DELETE"):
"""
Initialize a thread-safe sqlite-backed dictionary. The dictionary will
be a table `tablename` in database file `filename`. A single file (=database)
may contain multiple tables.
If no `filename` is given, a random file in temp will be used (and deleted
from temp once the dict is closed/deleted).
If you enable `autocommit`, changes will be committed after each operation
(more inefficient but safer). Otherwise, changes are committed on `self.commit()`,
`self.clear()` and `self.close()`.
Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems
or if you need performance and don't care about crash-consistency.
The `flag` parameter:
'c': default mode, open for read/write, creating the db/table if necessary.
'w': open for r/w, but drop `tablename` contents first (start with empty table)
'n': create a new database (erasing any existing tables, not just `tablename`!).
"""
self.in_temp = filename is None
if self.in_temp:
randpart = hex(random.randint(0, 0xffffff))[2:]
filename = os.path.join(tempfile.gettempdir(), 'sqldict' + randpart)
if flag == 'n':
if os.path.exists(filename):
os.remove(filename)
self.filename = filename
self.tablename = tablename
logger.info("opening Sqlite table %r in %s" % (tablename, filename))
MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS %s (key TEXT PRIMARY KEY, value BLOB)' % self.tablename
self.conn = SqliteMultithread(filename, autocommit=autocommit, journal_mode=journal_mode)
self.conn.execute(MAKE_TABLE)
self.conn.commit()
if flag == 'w':
self.clear()
def __str__(self):
# return "SqliteDict(%i items in %s)" % (len(self), self.conn.filename)
return "SqliteDict(%s)" % (self.conn.filename)
def __len__(self):
# `select count (*)` is super slow in sqlite (does a linear scan!!)
# As a result, len() is very slow too once the table size grows beyond trivial.
# We could keep the total count of rows ourselves, by means of triggers,
# but that seems too complicated and would slow down normal operation
# (insert/delete etc).
GET_LEN = 'SELECT COUNT(*) FROM %s' % self.tablename
rows = self.conn.select_one(GET_LEN)[0]
return rows if rows is not None else 0
def __bool__(self):
GET_LEN = 'SELECT MAX(ROWID) FROM %s' % self.tablename
return self.conn.select_one(GET_LEN) is not None
def iterkeys(self):
GET_KEYS = 'SELECT key FROM %s ORDER BY rowid' % self.tablename
for key in self.conn.select(GET_KEYS):
yield key[0]
def itervalues(self):
GET_VALUES = 'SELECT value FROM %s ORDER BY rowid' % self.tablename
for value in self.conn.select(GET_VALUES):
yield decode(value[0])
def iteritems(self):
GET_ITEMS = 'SELECT key, value FROM %s ORDER BY rowid' % self.tablename
for key, value in self.conn.select(GET_ITEMS):
yield key, decode(value)
def __contains__(self, key):
HAS_ITEM = 'SELECT 1 FROM %s WHERE key = ?' % self.tablename
return self.conn.select_one(HAS_ITEM, (key,)) is not None
def __getitem__(self, key):
GET_ITEM = 'SELECT value FROM %s WHERE key = ?' % self.tablename
item = self.conn.select_one(GET_ITEM, (key,))
if item is None:
raise KeyError(key)
return decode(item[0])
def __setitem__(self, key, value):
ADD_ITEM = 'REPLACE INTO %s (key, value) VALUES (?,?)' % self.tablename
self.conn.execute(ADD_ITEM, (key, encode(value)))
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
DEL_ITEM = 'DELETE FROM %s WHERE key = ?' % self.tablename
self.conn.execute(DEL_ITEM, (key,))
def update(self, items=(), **kwds):
try:
items = [(k, encode(v)) for k, v in items.iteritems()]
except AttributeError:
pass
UPDATE_ITEMS = 'REPLACE INTO %s (key, value) VALUES (?, ?)' % self.tablename
self.conn.executemany(UPDATE_ITEMS, items)
if kwds:
self.update(kwds)
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def __iter__(self):
return self.iterkeys()
def clear(self):
CLEAR_ALL = 'DELETE FROM %s;' % self.tablename # avoid VACUUM, as it gives "OperationalError: database schema has changed"
self.conn.commit()
self.conn.execute(CLEAR_ALL)
self.conn.commit()
def commit(self):
if self.conn is not None:
self.conn.commit()
sync = commit
def close(self):
logger.debug("closing %s" % self)
if self.conn is not None:
if self.conn.autocommit:
self.conn.commit()
self.conn.close()
self.conn = None
if self.in_temp:
try:
os.remove(self.filename)
except:
pass
def terminate(self):
"""Delete the underlying database file. Use with care."""
self.close()
logger.info("deleting %s" % self.filename)
try:
os.remove(self.filename)
except IOError, e:
logger.warning("failed to delete %s: %s" % (self.filename, e))
def __del__(self):
# like close(), but assume globals are gone by now (such as the logger)
try:
if self.conn is not None:
if self.conn.autocommit:
self.conn.commit()
self.conn.close()
self.conn = None
if self.in_temp:
os.remove(self.filename)
except:
pass
#endclass SqliteDict
class SqliteMultithread(Thread):
"""
Wrap sqlite connection in a way that allows concurrent requests from multiple threads.
This is done by internally queueing the requests and processing them sequentially
in a separate thread (in the same order they arrived).
"""
def __init__(self, filename, autocommit, journal_mode):
super(SqliteMultithread, self).__init__()
self.filename = filename
self.autocommit = autocommit
self.journal_mode = journal_mode
self.reqs = Queue() # use request queue of unlimited size
self.setDaemon(True) # python2.5-compatible
self.start()
def run(self):
if self.autocommit:
conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False)
else:
conn = sqlite3.connect(self.filename, check_same_thread=False)
conn.execute('PRAGMA journal_mode = %s' % self.journal_mode)
conn.text_factory = str
cursor = conn.cursor()
cursor.execute('PRAGMA synchronous=OFF')
while True:
req, arg, res = self.reqs.get()
if req == '--close--':
break
elif req == '--commit--':
conn.commit()
else:
cursor.execute(req, arg)
if res:
for rec in cursor:
res.put(rec)
res.put('--no more--')
if self.autocommit:
conn.commit()
conn.close()
def execute(self, req, arg=None, res=None):
"""
`execute` calls are non-blocking: just queue up the request and return immediately.
"""
self.reqs.put((req, arg or tuple(), res))
def executemany(self, req, items):
for item in items:
self.execute(req, item)
def select(self, req, arg=None):
"""
Unlike sqlite's native select, this select doesn't handle iteration efficiently.
The result of `select` starts filling up with values as soon as the
request is dequeued, and although you can iterate over the result normally
(`for res in self.select(): ...`), the entire result will be in memory.
"""
res = Queue() # results of the select will appear as items in this queue
self.execute(req, arg, res)
while True:
rec = res.get()
if rec == '--no more--':
break
yield rec
def select_one(self, req, arg=None):
"""Return only the first row of the SELECT, or None if there are no matching rows."""
try:
return iter(self.select(req, arg)).next()
except StopIteration:
return None
def commit(self):
self.execute('--commit--')
def close(self):
self.execute('--close--')
#endclass SqliteMultithread
# running sqlitedict.py as script will perform a simple unit test
if __name__ in '__main___':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(module)s:%(lineno)d : %(funcName)s(%(threadName)s) : %(message)s')
logging.root.setLevel(level=logging.INFO)
for d in SqliteDict(), SqliteDict('example', flag='n'):
assert list(d) == []
assert len(d) == 0
assert not d
d['abc'] = 'rsvp' * 100
assert d['abc'] == 'rsvp' * 100
assert len(d) == 1
d['abc'] = 'lmno'
assert d['abc'] == 'lmno'
assert len(d) == 1
del d['abc']
assert not d
assert len(d) == 0
d['abc'] = 'lmno'
d['xyz'] = 'pdq'
assert len(d) == 2
assert list(d.iteritems()) == [('abc', 'lmno'), ('xyz', 'pdq')]
assert d.items() == [('abc', 'lmno'), ('xyz', 'pdq')]
assert d.values() == ['lmno', 'pdq']
assert d.keys() == ['abc', 'xyz']
assert list(d) == ['abc', 'xyz']
d.update(p='x', q='y', r='z')
assert len(d) == 5
assert d.items() == [('abc', 'lmno'), ('xyz', 'pdq'), ('q', 'y'), ('p', 'x'), ('r', 'z')]
del d['abc']
try:
error = d['abc']
except KeyError:
pass
else:
assert False
try:
del d['abc']
except KeyError:
pass
else:
assert False
assert list(d) == ['xyz', 'q', 'p', 'r']
assert d
d.clear()
assert not d
assert list(d) == []
d.update(p='x', q='y', r='z')
assert list(d) == ['q', 'p', 'r']
d.clear()
assert not d
d.close()
print 'all tests passed :-)'