• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

zopefoundation / ZODB / 18153960591

01 Oct 2025 06:50AM UTC coverage: 83.781% (-0.03%) from 83.811%
18153960591

Pull #415

github

web-flow
Update docs/articles/old-guide/convert_zodb_guide.py

Co-authored-by: Michael Howitz <icemac@gmx.net>
Pull Request #415: Apply the latest zope.meta templates

2441 of 3542 branches covered (68.92%)

193 of 257 new or added lines in 48 files covered. (75.1%)

12 existing lines in 6 files now uncovered.

13353 of 15938 relevant lines covered (83.78%)

0.84 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.69
/src/ZODB/tests/testCache.py
1
##############################################################################
2
#
3
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
4
# All Rights Reserved.
5
#
6
# This software is subject to the provisions of the Zope Public License,
7
# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
8
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
9
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
11
# FOR A PARTICULAR PURPOSE.
12
#
13
##############################################################################
14
"""A few simple tests of the public cache API.
15

16
Each DB Connection has a separate PickleCache.  The Cache serves two
17
purposes. It acts like a memo for unpickling.  It also keeps recent
18
objects in memory under the assumption that they may be used again.
19
"""
20

21
import doctest
1✔
22
import gc
1✔
23
import sys
1✔
24
import threading
1✔
25
import unittest
1✔
26

27
import transaction
1✔
28
from persistent import Persistent
1✔
29
from persistent import PickleCache
1✔
30
from persistent.mapping import PersistentMapping
1✔
31

32
import ZODB
1✔
33
import ZODB.MappingStorage
1✔
34
import ZODB.tests.util
1✔
35
from ZODB.tests.MinPO import MinPO
1✔
36
from ZODB.utils import p64
1✔
37

38

39
class CacheTestBase(ZODB.tests.util.TestCase):
1✔
40

41
    def setUp(self):
1✔
42
        ZODB.tests.util.TestCase.setUp(self)
1✔
43
        store = ZODB.MappingStorage.MappingStorage()
1✔
44
        self.db = ZODB.DB(store,
1✔
45
                          cache_size=self.CACHE_SIZE)
46
        self.conns = []
1✔
47

48
    def tearDown(self):
1✔
49
        self.db.close()
1✔
50
        ZODB.tests.util.TestCase.tearDown(self)
1✔
51

52
    CACHE_SIZE = 20
1✔
53

54
    def noodle_new_connection(self):
1✔
55
        """Do some reads and writes on a new connection."""
56

57
        c = self.db.open()
1✔
58
        self.conns.append(c)
1✔
59
        self.noodle_connection(c)
1✔
60

61
    def noodle_connection(self, c):
1✔
62
        r = c.root()
1✔
63

64
        i = len(self.conns)
1✔
65
        d = r.get(i)
1✔
66
        if d is None:
1!
67
            d = r[i] = PersistentMapping()
1✔
68
            transaction.commit()
1✔
69

70
        for i in range(15):
1✔
71
            o = d.get(i)
1✔
72
            if o is None:
1!
73
                o = d[i] = MinPO(i)
1✔
74
            o.value += 1
1✔
75
        transaction.commit()
1✔
76

77

78
# CantGetRidOfMe is used by checkMinimizeTerminates.
79
make_trouble = True
1✔
80

81

82
class CantGetRidOfMe(MinPO):
1✔
83
    def __init__(self, value):
1✔
84
        MinPO.__init__(self, value)
1✔
85
        self.an_attribute = 42
1✔
86

87
    def __del__(self):
1✔
88
        # Referencing an attribute of self causes self to be
89
        # loaded into the cache again, which also resurrects
90
        # self.
91
        if make_trouble:
1!
92
            self.an_attribute
1✔
93

94

95
class DBMethods(CacheTestBase):
1✔
96

97
    def setUp(self):
1✔
98
        CacheTestBase.setUp(self)
1✔
99
        for i in range(4):
1✔
100
            self.noodle_new_connection()
1✔
101

102
    def testCacheDetail(self):
1✔
103
        for name, count in self.db.cacheDetail():
1✔
104
            self.assertEqual(isinstance(name, str), True)
1✔
105
            self.assertEqual(isinstance(count, int), True)
1✔
106

107
    def testCacheExtremeDetail(self):
1✔
108
        expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
1✔
109
        for dict in self.db.cacheExtremeDetail():
1✔
110
            for k, v in dict.items():
1✔
111
                self.assertIn(k, expected)
1✔
112

113
    # TODO:  not really sure how to do a black box test of the cache.
114
    # Should the full sweep and minimize calls always remove things?
115

116
    def testFullSweep(self):
1✔
117
        old_size = self.db.cacheSize()
1✔
118
        self.db.cacheFullSweep()
1✔
119
        new_size = self.db.cacheSize()
1✔
120
        self.assertLess(new_size, old_size, f"{old_size} < {new_size}")
1✔
121

122
    def testMinimize(self):
1✔
123
        old_size = self.db.cacheSize()
1✔
124
        self.db.cacheMinimize()
1✔
125
        new_size = self.db.cacheSize()
1✔
126
        self.assertLess(new_size, old_size, f"{old_size} < {new_size}")
1✔
127

128
    def testMinimizeTerminates(self):
1✔
129
        # This is tricky.  cPickleCache had a case where it could get into
130
        # an infinite loop, but we don't want the test suite to hang
131
        # if this bug reappears.  So this test spawns a thread to run the
132
        # dangerous operation, and the main thread complains if the worker
133
        # thread hasn't finished in 30 seconds (arbitrary, but way more
134
        # than enough).  In that case, the worker thread will continue
135
        # running forever (until killed externally), but at least the
136
        # test suite will move on.
137
        #
138
        # The bug was triggered by having a persistent object whose __del__
139
        # method references an attribute of the object.  An attempt to
140
        # ghostify such an object will clear the attribute, and if the
141
        # cache also releases the last Python reference to the object then
142
        # (due to ghostifying it), the __del__ method gets invoked.
143
        # Referencing the attribute loads the object again, and also
144
        # puts it back into the cPickleCache.  If the cache implementation
145
        # isn't looking out for this, it can get into an infinite loop
146
        # then, endlessly trying to ghostify an object that in turn keeps
147
        # unghostifying itself again.
148

149
        # This test uses threads, so we can't use the default
150
        # transaction manager.
151
        for conn in self.conns:
1✔
152
            conn.close()
1✔
153
        self.conns[0] = self.db.open(transaction.TransactionManager())
1✔
154

155
        class Worker(threading.Thread):
1✔
156

157
            def __init__(self, testcase):
1✔
158
                threading.Thread.__init__(self)
1✔
159
                self.testcase = testcase
1✔
160

161
            def run(self):
1✔
162
                global make_trouble
163
                # Make CantGetRidOfMe.__del__ dangerous.
164
                make_trouble = True
1✔
165

166
                conn = self.testcase.conns[0]
1✔
167
                r = conn.root()
1✔
168
                d = r[1]
1✔
169
                for i in range(len(d)):
1✔
170
                    d[i] = CantGetRidOfMe(i)
1✔
171
                conn.transaction_manager.commit()
1✔
172

173
                self.testcase.db.cacheMinimize()
1✔
174

175
                # Defang the nasty objects.  Else, because they're
176
                # immortal now, they hang around and create trouble
177
                # for subsequent tests.
178
                make_trouble = False
1✔
179
                self.testcase.db.cacheMinimize()
1✔
180

181
        w = Worker(self)
1✔
182
        w.start()
1✔
183
        w.join(30)
1✔
184
        if w.is_alive():
1✔
185
            self.fail("cacheMinimize still running after 30 seconds -- "
186
                      "almost certainly in an infinite loop")
187

188
    # TODO:  don't have an explicit test for incrgc, because the
189
    # connection and database call it internally.
190
    # Same for the get and invalidate methods.
191

192
    def testLRUitems(self):
1✔
193
        # get a cache
194
        c = self.conns[0]._cache
1✔
195
        c.lru_items()
1✔
196

197
    def testClassItems(self):
1✔
198
        c = self.conns[0]._cache
1✔
199
        c.klass_items()
1✔
200

201

202
class LRUCacheTests(CacheTestBase):
1✔
203

204
    def testLRU(self):
1✔
205
        # verify the LRU behavior of the cache
206
        dataset_size = 5
1✔
207
        CACHE_SIZE = dataset_size * 2 + 1
1✔
208
        # a cache big enough to hold the objects added in two
209
        # transactions, plus the root object
210
        self.db.setCacheSize(CACHE_SIZE)
1✔
211
        c = self.db.open()
1✔
212
        r = c.root()
1✔
213
        l_ = {}
1✔
214
        # the root is the only thing in the cache, because all the
215
        # other objects are new
216
        self.assertEqual(len(c._cache), 1)
1✔
217
        # run several transactions
218
        for t in range(5):
1✔
219
            for i in range(dataset_size):
1✔
220
                l_[(t, i)] = r[i] = MinPO(i)
1✔
221
            transaction.commit()
1✔
222
            # commit() will register the objects, placing them in the
223
            # cache.  at the end of commit, the cache will be reduced
224
            # down to CACHE_SIZE items
225
            if len(l_) > CACHE_SIZE:
1✔
226
                self.assertEqual(c._cache.ringlen(), CACHE_SIZE)
1✔
227
        for i in range(dataset_size):
1✔
228
            # Check objects added in the first two transactions.
229
            # They must all be ghostified.
230
            self.assertEqual(l_[(0, i)]._p_changed, None)
1✔
231
            self.assertEqual(l_[(1, i)]._p_changed, None)
1✔
232
            # Check objects added in the last two transactions.
233
            # They must all still exist in memory, but have
234
            # had their changes flushed
235
            self.assertEqual(l_[(3, i)]._p_changed, 0)
1✔
236
            self.assertEqual(l_[(4, i)]._p_changed, 0)
1✔
237
            # Of the objects added in the middle transaction, most
238
            # will have been ghostified. There is one cache slot
239
            # that may be occupied by either one of those objects or
240
            # the root, depending on precise order of access. We do
241
            # not bother to check this
242

243
    def testSize(self):
1✔
244
        self.db.cacheMinimize()
1✔
245
        self.assertEqual(self.db.cacheSize(), 0)
1✔
246

247
        CACHE_SIZE = 10
1✔
248
        self.db.setCacheSize(CACHE_SIZE)
1✔
249

250
        CONNS = 3
1✔
251
        for i in range(CONNS):
1✔
252
            self.noodle_new_connection()
1✔
253

254
        self.assertEqual(self.db.cacheSize(), CACHE_SIZE * CONNS)
1✔
255
        details = self.db.cacheDetailSize()
1✔
256
        self.assertEqual(len(details), CONNS)
1✔
257
        for d in details:
1✔
258
            self.assertEqual(d['ngsize'], CACHE_SIZE)
1✔
259

260
            # The assertion below is non-sensical
261
            # The (poorly named) cache size is a target for non-ghosts.
262
            # The cache *usually* contains non-ghosts, so that the
263
            # size normally exceeds the target size.
264

265
            # self.assertEqual(d['size'], CACHE_SIZE)
266

267
    def testDetail(self):
1✔
268
        CACHE_SIZE = 10
1✔
269
        self.db.setCacheSize(CACHE_SIZE)
1✔
270

271
        CONNS = 3
1✔
272
        for i in range(CONNS):
1✔
273
            self.noodle_new_connection()
1✔
274

275
        gc.collect()
1✔
276

277
        # Obscure:  The above gc.collect call is necessary to make this test
278
        # pass.
279
        #
280
        # This test then only works because the order of computations
281
        # and object accesses in the "noodle" calls is such that the
282
        # persistent mapping containing the MinPO objects is
283
        # deactivated before the MinPO objects.
284
        #
285
        # - Without the gc call, the cache will contain ghost MinPOs
286
        #   and the check of the MinPO count below will fail. That's
287
        #   because the counts returned by cacheDetail include ghosts.
288
        #
289
        # - If the mapping object containing the MinPOs isn't
290
        #   deactivated, there will be one fewer non-ghost MinPO and
291
        #   the test will fail anyway.
292
        #
293
        # This test really needs to be thought through and documented
294
        # better.
295

296
        for klass, count in self.db.cacheDetail():
1✔
297
            if klass.endswith('MinPO'):
1!
298
                self.assertEqual(count, CONNS * CACHE_SIZE)
1✔
299
            if klass.endswith('PersistentMapping'):
1!
300
                # one root per connection
301
                self.assertEqual(count, CONNS)
×
302

303
        for details in self.db.cacheExtremeDetail():
1✔
304
            # one 'details' dict per object
305
            if details['klass'].endswith('PersistentMapping'):
1!
306
                self.assertEqual(details['state'], None)
×
307
            else:
308
                self.assertTrue(details['klass'].endswith('MinPO'))
1✔
309
                self.assertEqual(details['state'], 0)
1✔
310
            # The cache should never hold an unreferenced ghost.
311
            if details['state'] is None:    # i.e., it's a ghost
1!
NEW
312
                self.assertGreater(details['rc'], 0)
×
313

314

315
class StubDataManager:
1✔
316
    def setklassstate(self, object):
1✔
317
        pass
×
318

319

320
class StubObject(Persistent):
1✔
321
    pass
1✔
322

323

324
class CacheErrors(unittest.TestCase):
1✔
325

326
    def setUp(self):
1✔
327
        self.jar = StubDataManager()
1✔
328
        self.cache = PickleCache(self.jar)
1✔
329

330
    def testGetBogusKey(self):
1✔
331
        self.assertEqual(self.cache.get(p64(0)), None)
1✔
332
        try:
1✔
333
            self.cache[12]
1✔
334
        except KeyError:
1✔
335
            pass
1✔
336
        else:
337
            self.fail("expected KeyError")
338
        try:
1✔
339
            self.cache[12] = 12
1✔
340
        except TypeError:
1✔
341
            pass
1✔
342
        else:
343
            self.fail("expected TyepError")
344
        try:
1✔
345
            del self.cache[12]
1✔
346
        except TypeError:
1✔
347
            pass
1✔
348
        else:
349
            self.fail("expected TypeError")
350

351
    def testBogusObject(self):
1✔
352
        def add(key, obj):
1✔
353
            self.cache[key] = obj
1✔
354

355
        # getrefcount is an implementation detail of CPython,
356
        # not present under PyPy/Jython
357
        rc = getattr(sys, 'getrefcount', lambda x: 1)
1✔
358
        nones = rc(None)
1✔
359

360
        key = p64(2)
1✔
361
        # value isn't persistent
362
        self.assertRaises(TypeError, add, key, 12)
1✔
363

364
        o = StubObject()
1✔
365
        # o._p_oid == None
366
        self.assertRaises(TypeError, add, key, o)
1✔
367

368
        o._p_oid = p64(3)
1✔
369
        self.assertRaises(ValueError, add, key, o)
1✔
370

371
        o._p_oid = key
1✔
372
        # o._p_jar == None
373
        self.assertRaises(Exception, add, key, o)
1✔
374

375
        o._p_jar = self.jar
1✔
376
        self.cache[key] = o
1✔
377
        # make sure it can be added multiple times
378
        self.cache[key] = o
1✔
379

380
        # same object, different keys
381
        self.assertRaises(ValueError, add, p64(0), o)
1✔
382

383
        if sys.gettrace() is None:
1!
384
            # 'coverage' keeps track of coverage information in a data
385
            # structure that adds a new reference to None for each executed
386
            # line of code, which interferes with this test.  So check it
387
            # only if we're running without coverage tracing.
388

389
            # On Python 3.7, we can see the value of reference counts
390
            # to None actually go *down* by a few. Possibly it has to
391
            # do with the lazy tracking of frames?
392
            # (https://github.com/python/cpython/commit/5a625d0aa6a6d9ec6574ee8344b41d63dcb9897e)
393
            #
394
            # Likewise, on 3.8 with PURE_PYTHON it sometimes increases
395
            # by 1; this is cleared up by a garbage collection (it's
396
            # not clear where/why)
397
            new_nones = rc(None)
×
398
            if new_nones > nones:
×
399
                gc.collect()
×
400
            self.assertLessEqual(rc(None), nones)
×
401

402
    def testTwoCaches(self):
1✔
403
        jar2 = StubDataManager()
1✔
404
        cache2 = PickleCache(jar2)
1✔
405

406
        o = StubObject()
1✔
407
        key = o._p_oid = p64(1)
1✔
408
        o._p_jar = jar2
1✔
409

410
        cache2[key] = o
1✔
411

412
        try:
1✔
413
            self.cache[key] = o
1✔
414
        except ValueError:
1✔
415
            pass
1✔
416
        else:
417
            self.fail("expected ValueError because object already in cache")
418

419
    def testReadOnlyAttrsWhenCached(self):
1✔
420
        o = StubObject()
1✔
421
        key = o._p_oid = p64(1)
1✔
422
        o._p_jar = self.jar
1✔
423
        self.cache[key] = o
1✔
424
        try:
1✔
425
            o._p_oid = p64(2)
1✔
426
        except ValueError:
1✔
427
            pass
1✔
428
        else:
429
            self.fail("expect that you can't change oid of cached object")
430
        try:
1✔
431
            del o._p_jar
1✔
432
        except ValueError:
1✔
433
            pass
1✔
434
        else:
435
            self.fail("expect that you can't delete jar of cached object")
436

437
    def testTwoObjsSameOid(self):
1✔
438
        # Try to add two distinct objects with the same oid to the cache.
439
        # This has always been an error, but the error message prior to
440
        # ZODB 3.2.6 didn't make sense.  This test verifies that (a) an
441
        # exception is raised; and, (b) the error message is the intended
442
        # one.
443
        obj1 = StubObject()
1✔
444
        key = obj1._p_oid = p64(1)
1✔
445
        obj1._p_jar = self.jar
1✔
446
        self.cache[key] = obj1
1✔
447

448
        obj2 = StubObject()
1✔
449
        obj2._p_oid = key
1✔
450
        obj2._p_jar = self.jar
1✔
451
        try:
1✔
452
            self.cache[key] = obj2
1✔
453
        except ValueError as detail:
1✔
454
            self.assertEqual(str(detail),
1✔
455
                             "A different object already has the same oid")
456
        else:
457
            self.fail("two objects with the same oid should have failed")
458

459

460
def test_basic_cache_size_estimation():
1✔
461
    """Make sure the basic accounting is correct:
462

463
    >>> import ZODB.MappingStorage
464
    >>> db = ZODB.MappingStorage.DB()
465
    >>> conn = db.open()
466
    >>> conn.cacheMinimize(); _ = gc.collect() # See fix84.rst
467

468
    >>> def check_cache_size(cache, expected):
469
    ...     actual = cache.total_estimated_size
470
    ...     if actual != expected:
471
    ...         print("expected %d, got %d" % (expected, actual))
472
    ...         print("objects in cache:")
473
    ...         for oid, obj in sorted(cache.items()):
474
    ...             print(repr(oid), " - ", obj._p_estimated_size, "bytes")
475

476

477
The cache is empty initially:
478

479
    >>> check_cache_size(conn._cache, 0)
480

481
We force the root to be loaded and the cache grows:
482

483
    >>> getattr(conn.root, 'z', None)
484
    >>> root_size = conn.root._root._p_estimated_size
485
    >>> check_cache_size(conn._cache, root_size)
486

487
We need to unwrap the RootConvenience to get to the actual persistent
488
mapping that is our root object and see its estimated size
489

490
    >>> root_size in (64, 128)
491
    True
492

493
.. note::
494

495
    The actual size is 60 (Python 2.6 using cPickle; would be 62 if we
496
    used pickle) or 65 bytes (Python 3.3) due to slight differences in
497
    pickle bytecode that is used.  You can play with ::
498

499
        pickletools.dis(conn._storage.load(conn.root._root._p_oid)[0]))
500

501
    to see the differences in the first pickle (encoding the object class).
502
    and
503

504
        pickletools.dis(conn._storage.load(conn.root._root._p_oid)[0][N:]))
505

506
    to see the differences in the second pickle (encoding the object state,
507
    here N is the length of the first pickle).
508

509
    These sizes are then rounded up to a multiple of 64, to fit in a
510
    24-bit field for obscure reasons having to do with C structure size
511
    BBB due to evil packages shipping their own copies of cPersistence.h.
512

513
We add some data and the cache grows:
514

515
    >>> conn.root.z = ZODB.tests.util.P('x'*100)
516
    >>> import transaction
517
    >>> transaction.commit()
518
    >>> root_size = conn.root._root._p_estimated_size
519
    >>> z_size = conn.root.z._p_estimated_size
520
    >>> check_cache_size(conn._cache, root_size + z_size)
521

522
Note that the size of the root object increased also, so we need to take
523
a new measurement
524

525
    >>> root_size in (128, 192)
526
    True
527
    >>> z_size
528
    192
529

530
Loading the objects in another connection gets the same sizes:
531

532
    >>> conn2 = db.open()
533
    >>> check_cache_size(conn2._cache, 0)
534
    >>> getattr(conn2.root, 'x', None)
535
    >>> check_cache_size(conn2._cache, root_size)
536
    >>> _ = conn2.root.z.name
537
    >>> check_cache_size(conn2._cache, root_size + z_size)
538

539
If we deactivate, the size goes down:
540

541
    >>> conn2.root.z._p_deactivate()
542
    >>> check_cache_size(conn2._cache, root_size)
543

544
Loading data directly, rather than through traversal updates the cache
545
size correctly:
546

547
    >>> conn3 = db.open()
548
    >>> _ = conn3.get(conn2.root.z._p_oid).name
549
    >>> check_cache_size(conn3._cache, z_size)
550

551
    """
552

553

554
def test_suite():
1✔
555
    s = unittest.defaultTestLoader.loadTestsFromTestCase(DBMethods)
1✔
556
    s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(LRUCacheTests))
1✔
557
    s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(CacheErrors))
1✔
558
    s.addTest(doctest.DocTestSuite())
1✔
559
    return s
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc