mirror of https://github.com/nucypher/nucypher.git
Make update_stats non-public to indicate that it should not be used directly; really it should only be called by a `NodeLatencyContextManager`.
parent
c94da3e9d8
commit
bc8f8f1b84
|
@ -36,7 +36,7 @@ class NodeLatencyStatsCollector:
|
|||
# no exception
|
||||
end_time = time.perf_counter()
|
||||
execution_time = end_time - self.start_time
|
||||
self._stats_collector.update_stats(self.staker_address, execution_time)
|
||||
self._stats_collector._update_stats(self.staker_address, execution_time)
|
||||
|
||||
def __init__(self):
|
||||
# staker_address -> { "total_time": <float>, "count": <integer> }
|
||||
|
@ -45,7 +45,7 @@ class NodeLatencyStatsCollector:
|
|||
)
|
||||
self._lock = Lock()
|
||||
|
||||
def update_stats(self, staking_address: ChecksumAddress, latest_time_taken: float):
|
||||
def _update_stats(self, staking_address: ChecksumAddress, latest_time_taken: float):
|
||||
with self._lock:
|
||||
old_avg = self._node_stats[staking_address][self.CURRENT_AVERAGE]
|
||||
old_count = self._node_stats[staking_address][self.COUNT]
|
||||
|
|
|
@ -305,7 +305,7 @@ def test_authorized_decryption(
|
|||
bob.node_latency_collector.reset_stats(ursula.checksum_address)
|
||||
# add a single data point for each ursula: some time between 0.1 and 4
|
||||
mock_latency = random.uniform(0.1, 4)
|
||||
bob.node_latency_collector.update_stats(ursula.checksum_address, mock_latency)
|
||||
bob.node_latency_collector._update_stats(ursula.checksum_address, mock_latency)
|
||||
latency_stats[ursula.checksum_address] = mock_latency
|
||||
|
||||
expected_ursula_request_ordering = sorted(
|
||||
|
|
|
@ -76,7 +76,7 @@ def test_collector_stats_obtained(execution_data):
|
|||
# update stats for all nodes
|
||||
for node, execution_times in executions.items():
|
||||
for i, exec_time in enumerate(execution_times):
|
||||
node_latency_collector.update_stats(node, exec_time)
|
||||
node_latency_collector._update_stats(node, exec_time)
|
||||
|
||||
# check ongoing average
|
||||
subset_of_times = execution_times[: (i + 1)]
|
||||
|
@ -110,7 +110,7 @@ def test_collector_stats_reset(execution_data):
|
|||
# update stats for all nodes
|
||||
for node, execution_times in executions.items():
|
||||
for exec_time in execution_times:
|
||||
node_latency_collector.update_stats(node, exec_time)
|
||||
node_latency_collector._update_stats(node, exec_time)
|
||||
|
||||
assert floats_sufficiently_equal(
|
||||
node_latency_collector.get_average_latency_time(node),
|
||||
|
@ -179,7 +179,7 @@ def test_collector_simple_concurrency(execution_data):
|
|||
for exec_time in execution_times:
|
||||
# add some delay for better concurrency
|
||||
time.sleep(0.1)
|
||||
node_latency_collector.update_stats(node_address, exec_time)
|
||||
node_latency_collector._update_stats(node_address, exec_time)
|
||||
|
||||
# use thread pool
|
||||
n_threads = len(executions)
|
||||
|
|
Loading…
Reference in New Issue