Coveralls logob
Coveralls logo
  • Home
  • Features
  • Pricing
  • Docs
  • Sign In

matrix-org / synapse / 4532

23 Sep 2019 - 19:39 coverage decreased (-49.7%) to 17.596%
4532

Pull #6079

buildkite

Richard van der Hoff
update changelog
Pull Request #6079: Add submit_url response parameter to msisdn /requestToken

359 of 12986 branches covered (2.76%)

Branch coverage included in aggregate %.

0 of 7 new or added lines in 1 file covered. (0.0%)

18869 existing lines in 281 files now uncovered.

8809 of 39116 relevant lines covered (22.52%)

0.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

27.67
/synapse/util/caches/lrucache.py
1
# -*- coding: utf-8 -*-
2
# Copyright 2015, 2016 OpenMarket Ltd
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import threading
1×
18
from functools import wraps
1×
19

20
from synapse.util.caches.treecache import TreeCache
1×
21

22

23
def enumerate_leaves(node, depth):
1×
UNCOV
24
    if depth == 0:
Branches [[0, 25], [0, 27]] missed. !
UNCOV
25
        yield node
!
26
    else:
UNCOV
27
        for n in node.values():
Branches [[0, 23], [0, 28]] missed. !
UNCOV
28
            for m in enumerate_leaves(n, depth - 1):
Branches [[0, 27], [0, 29]] missed. !
UNCOV
29
                yield m
!
30

31

32
class _Node(object):
1×
33
    __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"]
1×
34

35
    def __init__(self, prev_node, next_node, key, value, callbacks=set()):
1×
36
        self.prev_node = prev_node
1×
37
        self.next_node = next_node
1×
38
        self.key = key
1×
39
        self.value = value
1×
40
        self.callbacks = callbacks
1×
41

42

43
class LruCache(object):
1×
44
    """
45
    Least-recently-used cache.
46
    Supports del_multi only if cache_type=TreeCache
47
    If cache_type=TreeCache, all keys must be tuples.
48

49
    Can also set callbacks on objects when getting/setting which are fired
50
    when that key gets invalidated/evicted.
51
    """
52

53
    def __init__(
1×
54
        self,
55
        max_size,
56
        keylen=1,
57
        cache_type=dict,
58
        size_callback=None,
59
        evicted_callback=None,
60
    ):
61
        """
62
        Args:
63
            max_size (int):
64

65
            keylen (int):
66

67
            cache_type (type):
68
                type of underlying cache to be used. Typically one of dict
69
                or TreeCache.
70

71
            size_callback (func(V) -> int | None):
72

73
            evicted_callback (func(int)|None):
74
                if not None, called on eviction with the size of the evicted
75
                entry
76
        """
77
        cache = cache_type()
1×
78
        self.cache = cache  # Used for introspection.
1×
79
        list_root = _Node(None, None, None, None)
1×
80
        list_root.next_node = list_root
1×
81
        list_root.prev_node = list_root
1×
82

83
        lock = threading.Lock()
1×
84

85
        def evict():
1×
UNCOV
86
            while cache_len() > max_size:
Branches [[0, 85], [0, 87]] missed. !
UNCOV
87
                todelete = list_root.prev_node
!
UNCOV
88
                evicted_len = delete_node(todelete)
!
UNCOV
89
                cache.pop(todelete.key, None)
!
UNCOV
90
                if evicted_callback:
Branches [[0, 86], [0, 91]] missed. !
UNCOV
91
                    evicted_callback(evicted_len)
!
92

93
        def synchronized(f):
1×
94
            @wraps(f)
1×
95
            def inner(*args, **kwargs):
UNCOV
96
                with lock:
!
UNCOV
97
                    return f(*args, **kwargs)
!
98

99
            return inner
1×
100

101
        cached_cache_len = [0]
1×
102
        if size_callback is not None:
Branches [[0, 104]] missed. 1×
103

UNCOV
104
            def cache_len():
!
UNCOV
105
                return cached_cache_len[0]
!
106

107
        else:
108

109
            def cache_len():
1×
UNCOV
110
                return len(cache)
!
111

112
        self.len = synchronized(cache_len)
1×
113

114
        def add_node(key, value, callbacks=set()):
1×
UNCOV
115
            prev_node = list_root
!
UNCOV
116
            next_node = prev_node.next_node
!
UNCOV
117
            node = _Node(prev_node, next_node, key, value, callbacks)
!
UNCOV
118
            prev_node.next_node = node
!
UNCOV
119
            next_node.prev_node = node
!
UNCOV
120
            cache[key] = node
!
121

UNCOV
122
            if size_callback:
Branches [[0, 114], [0, 123]] missed. !
UNCOV
123
                cached_cache_len[0] += size_callback(node.value)
!
124

125
        def move_node_to_front(node):
1×
UNCOV
126
            prev_node = node.prev_node
!
UNCOV
127
            next_node = node.next_node
!
UNCOV
128
            prev_node.next_node = next_node
!
UNCOV
129
            next_node.prev_node = prev_node
!
UNCOV
130
            prev_node = list_root
!
UNCOV
131
            next_node = prev_node.next_node
!
UNCOV
132
            node.prev_node = prev_node
!
UNCOV
133
            node.next_node = next_node
!
UNCOV
134
            prev_node.next_node = node
!
UNCOV
135
            next_node.prev_node = node
!
136

137
        def delete_node(node):
1×
UNCOV
138
            prev_node = node.prev_node
!
UNCOV
139
            next_node = node.next_node
!
UNCOV
140
            prev_node.next_node = next_node
!
UNCOV
141
            next_node.prev_node = prev_node
!
142

UNCOV
143
            deleted_len = 1
!
UNCOV
144
            if size_callback:
Branches [[0, 145], [0, 148]] missed. !
UNCOV
145
                deleted_len = size_callback(node.value)
!
UNCOV
146
                cached_cache_len[0] -= deleted_len
!
147

UNCOV
148
            for cb in node.callbacks:
Branches [[0, 149], [0, 150]] missed. !
UNCOV
149
                cb()
!
UNCOV
150
            node.callbacks.clear()
!
UNCOV
151
            return deleted_len
!
152

153
        @synchronized
1×
154
        def cache_get(key, default=None, callbacks=[]):
1×
UNCOV
155
            node = cache.get(key, None)
!
UNCOV
156
            if node is not None:
Branches [[0, 157], [0, 161]] missed. !
UNCOV
157
                move_node_to_front(node)
!
UNCOV
158
                node.callbacks.update(callbacks)
!
UNCOV
159
                return node.value
!
160
            else:
UNCOV
161
                return default
!
162

163
        @synchronized
1×
164
        def cache_set(key, value, callbacks=[]):
1×
UNCOV
165
            node = cache.get(key, None)
!
UNCOV
166
            if node is not None:
Branches [[0, 170], [0, 188]] missed. !
167
                # We sometimes store large objects, e.g. dicts, which cause
168
                # the inequality check to take a long time. So let's only do
169
                # the check if we have some callbacks to call.
UNCOV
170
                if node.callbacks and value != node.value:
Branches [[0, 171], [0, 179]] missed. !
171
                    for cb in node.callbacks:
Branches [[0, 172], [0, 173]] missed. !
172
                        cb()
!
173
                    node.callbacks.clear()
!
174

175
                # We don't bother to protect this by value != node.value as
176
                # generally size_callback will be cheap compared with equality
177
                # checks. (For example, taking the size of two dicts is quicker
178
                # than comparing them for equality.)
UNCOV
179
                if size_callback:
Branches [[0, 180], [0, 183]] missed. !
180
                    cached_cache_len[0] -= size_callback(node.value)
!
181
                    cached_cache_len[0] += size_callback(value)
!
182

UNCOV
183
                node.callbacks.update(callbacks)
!
184

UNCOV
185
                move_node_to_front(node)
!
UNCOV
186
                node.value = value
!
187
            else:
UNCOV
188
                add_node(key, value, set(callbacks))
!
189

UNCOV
190
            evict()
!
191

192
        @synchronized
1×
193
        def cache_set_default(key, value):
194
            node = cache.get(key, None)
!
195
            if node is not None:
Branches [[0, 196], [0, 198]] missed. !
196
                return node.value
!
197
            else:
198
                add_node(key, value)
!
199
                evict()
!
200
                return value
!
201

202
        @synchronized
1×
203
        def cache_pop(key, default=None):
1×
UNCOV
204
            node = cache.get(key, None)
!
UNCOV
205
            if node:
Branches [[0, 206], [0, 210]] missed. !
UNCOV
206
                delete_node(node)
!
UNCOV
207
                cache.pop(node.key, None)
!
UNCOV
208
                return node.value
!
209
            else:
UNCOV
210
                return default
!
211

212
        @synchronized
1×
213
        def cache_del_multi(key):
214
            """
215
            This will only work if constructed with cache_type=TreeCache
216
            """
UNCOV
217
            popped = cache.pop(key)
!
UNCOV
218
            if popped is None:
Branches [[0, 219], [0, 220]] missed. !
UNCOV
219
                return
!
UNCOV
220
            for leaf in enumerate_leaves(popped, keylen - len(key)):
Branches [[0, 212], [0, 221]] missed. !
UNCOV
221
                delete_node(leaf)
!
222

223
        @synchronized
1×
224
        def cache_clear():
UNCOV
225
            list_root.next_node = list_root
!
UNCOV
226
            list_root.prev_node = list_root
!
UNCOV
227
            for node in cache.values():
Branches [[0, 228], [0, 230]] missed. !
UNCOV
228
                for cb in node.callbacks:
Branches [[0, 227], [0, 229]] missed. !
229
                    cb()
!
UNCOV
230
            cache.clear()
!
UNCOV
231
            if size_callback:
Branches [[0, 223], [0, 232]] missed. !
232
                cached_cache_len[0] = 0
!
233

234
        @synchronized
1×
235
        def cache_contains(key):
236
            return key in cache
!
237

238
        self.sentinel = object()
1×
239
        self.get = cache_get
1×
240
        self.set = cache_set
1×
241
        self.setdefault = cache_set_default
1×
242
        self.pop = cache_pop
1×
243
        if cache_type is TreeCache:
Branches [[0, 244]] missed. 1×
UNCOV
244
            self.del_multi = cache_del_multi
!
245
        self.len = synchronized(cache_len)
1×
246
        self.contains = cache_contains
1×
247
        self.clear = cache_clear
1×
248

249
    def __getitem__(self, key):
1×
250
        result = self.get(key, self.sentinel)
!
251
        if result is self.sentinel:
Branches [[0, 252], [0, 254]] missed. !
252
            raise KeyError()
!
253
        else:
254
            return result
!
255

256
    def __setitem__(self, key, value):
1×
UNCOV
257
        self.set(key, value)
!
258

259
    def __delitem__(self, key, value):
1×
260
        result = self.pop(key, self.sentinel)
!
261
        if result is self.sentinel:
Branches [[0, 259], [0, 262]] missed. !
262
            raise KeyError()
!
263

264
    def __len__(self):
1×
265
        return self.len()
!
266

267
    def __contains__(self, key):
1×
268
        return self.contains(key)
!
Troubleshooting · Open an Issue · Sales · Support · ENTERPRISE · CAREERS · STATUS
BLOG · TWITTER · Legal & Privacy · Supported CI Services · What's a CI service? · Automated Testing

© 2019 Coveralls, LLC