mirror of https://github.com/nucypher/nucypher.git
Get rid of the KFrag storage step, both in server and client
parent
fed1b4aba8
commit
f1a24c6865
|
@ -194,13 +194,6 @@ class RestMiddleware:
|
|||
timeout=2)
|
||||
return response
|
||||
|
||||
def enact_policy(self, ursula, kfrag_id, payload):
|
||||
response = self.client.post(node_or_sprout=ursula,
|
||||
path=f'kFrag/{kfrag_id.hex()}',
|
||||
data=payload,
|
||||
timeout=2)
|
||||
return response
|
||||
|
||||
def reencrypt(self, work_order):
|
||||
ursula_rest_response = self.send_work_order_payload_to_ursula(work_order)
|
||||
splitter = cfrag_splitter + signature_splitter
|
||||
|
|
|
@ -23,25 +23,17 @@ import weakref
|
|||
from datetime import datetime, timedelta
|
||||
from typing import Tuple
|
||||
|
||||
from bytestring_splitter import BytestringSplitter
|
||||
from constant_sorrow import constants
|
||||
from constant_sorrow.constants import (
|
||||
FLEET_STATES_MATCH,
|
||||
NO_BLOCKCHAIN_CONNECTION,
|
||||
RELAX
|
||||
)
|
||||
from constant_sorrow.constants import FLEET_STATES_MATCH, RELAX
|
||||
from flask import Flask, Response, jsonify, request
|
||||
from mako import exceptions as mako_exceptions
|
||||
from mako.template import Template
|
||||
from maya import MayaDT
|
||||
from web3.exceptions import TimeExhausted
|
||||
|
||||
from nucypher.config.constants import MAX_UPLOAD_CONTENT_LENGTH
|
||||
from nucypher.crypto.keypairs import HostingKeypair
|
||||
from nucypher.crypto.kits import UmbralMessageKit
|
||||
from nucypher.crypto.powers import KeyPairBasedPower, PowerUpError
|
||||
from nucypher.crypto.signing import InvalidSignature
|
||||
from nucypher.crypto.umbral_adapter import KeyFrag, VerificationError
|
||||
from nucypher.crypto.utils import canonical_address_from_umbral_key
|
||||
from nucypher.datastore.datastore import Datastore, RecordNotFound, DatastoreTransactionError
|
||||
from nucypher.datastore.models import PolicyArrangement, TreasureMap, Workorder
|
||||
|
@ -219,58 +211,6 @@ def _make_rest_app(datastore: Datastore, this_node, domain: str, log: Logger) ->
|
|||
# TODO: Make this a legit response #234.
|
||||
return Response(b"This will eventually be an actual acceptance of the arrangement.", headers=headers)
|
||||
|
||||
@rest_app.route("/kFrag/<id_as_hex>", methods=['POST'])
|
||||
def set_policy(id_as_hex):
|
||||
"""
|
||||
REST endpoint for setting a kFrag.
|
||||
"""
|
||||
policy_message_kit = UmbralMessageKit.from_bytes(request.data)
|
||||
|
||||
alices_verifying_key = policy_message_kit.sender_verifying_key
|
||||
alice = _alice_class.from_public_keys(verifying_key=alices_verifying_key)
|
||||
|
||||
try:
|
||||
cleartext = this_node.verify_from(alice, policy_message_kit, decrypt=True)
|
||||
except InvalidSignature:
|
||||
# TODO: Perhaps we log this? Essentially 355.
|
||||
return Response("Invalid Signature", status_code=400)
|
||||
|
||||
if not this_node.federated_only:
|
||||
# This splitter probably belongs somewhere canonical.
|
||||
transaction_splitter = BytestringSplitter(32)
|
||||
tx, kfrag_bytes = transaction_splitter(cleartext, return_remainder=True)
|
||||
|
||||
try:
|
||||
# Get all of the arrangements and verify that we'll be paid.
|
||||
# TODO: We'd love for this part to be impossible to reduce the risk of collusion. #1274
|
||||
arranged_addresses = this_node.policy_agent.fetch_arrangement_addresses_from_policy_txid(tx, timeout=this_node.synchronous_query_timeout)
|
||||
except TimeExhausted:
|
||||
# Alice didn't pay. Return response with that weird status code.
|
||||
this_node.suspicious_activities_witnessed['freeriders'].append((alice, f"No transaction matching {tx}."))
|
||||
return Response(f"No paid transaction matching {tx} for this node", status=402)
|
||||
|
||||
this_node_has_been_arranged = this_node.checksum_address in arranged_addresses
|
||||
if not this_node_has_been_arranged:
|
||||
this_node.suspicious_activities_witnessed['freeriders'].append((alice, f"The transaction {tx} does not list me as a Worker - it lists {arranged_addresses}."))
|
||||
return Response(f"This node was not listed as servicing the policy in transaction {tx}", status=402)
|
||||
else:
|
||||
_tx = NO_BLOCKCHAIN_CONNECTION
|
||||
kfrag_bytes = cleartext
|
||||
kfrag = KeyFrag.from_bytes(kfrag_bytes)
|
||||
|
||||
try:
|
||||
verified_kfrag = kfrag.verify(verifying_pk=alices_verifying_key)
|
||||
except VerificationError:
|
||||
return Response(f"Signature on {kfrag} is invalid", status=403)
|
||||
|
||||
with datastore.describe(PolicyArrangement, id_as_hex, writeable=True) as policy_arrangement:
|
||||
if not policy_arrangement.alice_verifying_key == alice.stamp.as_umbral_pubkey():
|
||||
return Response("Policy arrangement's signing key does not match sender's", status=403)
|
||||
policy_arrangement.kfrag = verified_kfrag
|
||||
|
||||
# TODO: Sign the arrangement here. #495
|
||||
return "" # TODO: Return A 200, with whatever policy metadata.
|
||||
|
||||
@rest_app.route('/kFrag/<id_as_hex>', methods=["DELETE"])
|
||||
def revoke_arrangement(id_as_hex):
|
||||
"""
|
||||
|
|
|
@ -335,64 +335,10 @@ class Policy(ABC):
|
|||
timeout: int = 10,
|
||||
):
|
||||
"""
|
||||
Attempts to distribute kfrags to Ursulas that accepted arrangements earlier.
|
||||
Nothing to do here (FIXME)
|
||||
"""
|
||||
|
||||
def worker(ursula_and_kfrag):
|
||||
ursula, kfrag = ursula_and_kfrag
|
||||
arrangement = arrangements[ursula]
|
||||
|
||||
# TODO: seems like it would be enough to just encrypt this with Ursula's public key,
|
||||
# and not create a whole capsule.
|
||||
# Can't change for now since it's node protocol.
|
||||
payload = self._make_enactment_payload(publication_transaction, kfrag)
|
||||
message_kit, _signature = self.alice.encrypt_for(ursula, payload)
|
||||
|
||||
try:
|
||||
# TODO: Concurrency
|
||||
response = network_middleware.enact_policy(ursula,
|
||||
arrangement.id,
|
||||
message_kit.to_bytes())
|
||||
except network_middleware.UnexpectedResponse as e:
|
||||
status = e.status
|
||||
else:
|
||||
status = response.status_code
|
||||
|
||||
return status
|
||||
|
||||
value_factory = AllAtOnceFactory(list(zip(arrangements, self.kfrags)))
|
||||
worker_pool = WorkerPool(worker=worker,
|
||||
value_factory=value_factory,
|
||||
target_successes=self.n,
|
||||
timeout=timeout,
|
||||
threadpool_size=self.n)
|
||||
|
||||
worker_pool.start()
|
||||
|
||||
# Block until everything is complete. We need all the workers to finish.
|
||||
worker_pool.join()
|
||||
|
||||
successes = worker_pool.get_successes()
|
||||
|
||||
if len(successes) != self.n:
|
||||
raise Policy.EnactmentError()
|
||||
|
||||
# TODO: Enable re-tries?
|
||||
statuses = {ursula_and_kfrag[0].checksum_address: status for ursula_and_kfrag, status in successes.items()}
|
||||
if not all(status == 200 for status in statuses.values()):
|
||||
report = "\n".join(f"{address}: {status}" for address, status in statuses.items())
|
||||
self.log.debug(f"Policy enactment failed. Request statuses:\n{report}")
|
||||
|
||||
# OK, let's check: if two or more Ursulas claimed we didn't pay,
|
||||
# we need to re-evaulate our situation here.
|
||||
number_of_claims_of_freeloading = sum(status == 402 for status in statuses.values())
|
||||
|
||||
# TODO: a better exception here?
|
||||
if number_of_claims_of_freeloading > 2:
|
||||
raise self.alice.NotEnoughNodes
|
||||
|
||||
# otherwise just raise a more generic error
|
||||
raise Policy.EnactmentError()
|
||||
pass
|
||||
|
||||
def _make_treasure_map(self,
|
||||
network_middleware: RestMiddleware,
|
||||
|
|
Loading…
Reference in New Issue