mirror of https://github.com/nucypher/nucypher.git
Deprecate S3Bucket NodeStorage; Fininsh moving Banners.
parent
cec810966d
commit
30220a4ea1
|
@ -23,7 +23,8 @@ from constant_sorrow.constants import NO_PASSWORD
|
|||
from twisted.logger import Logger
|
||||
from twisted.logger import globalLogPublisher
|
||||
|
||||
from nucypher.cli.painting import BANNER
|
||||
from nucypher.characters.banners import BANNER
|
||||
from nucypher.config.constants import NUCYPHER_SENTRY_ENDPOINT
|
||||
from nucypher.utilities.logging import (
|
||||
logToSentry,
|
||||
getTextFileObserver,
|
||||
|
@ -33,7 +34,7 @@ from nucypher.utilities.logging import (
|
|||
|
||||
class NucypherClickConfig:
|
||||
|
||||
__sentry_endpoint = "https://d8af7c4d692e4692a455328a280d845e@sentry.io/1310685" # TODO: Use nucypher domain
|
||||
__sentry_endpoint = NUCYPHER_SENTRY_ENDPOINT
|
||||
|
||||
# Environment Variables
|
||||
config_file = os.environ.get('NUCYPHER_CONFIG_FILE', None)
|
||||
|
|
|
@ -27,7 +27,7 @@ from twisted.logger import globalLogPublisher
|
|||
from nucypher.blockchain.eth.actors import Deployer
|
||||
from nucypher.blockchain.eth.chains import Blockchain
|
||||
from nucypher.blockchain.eth.interfaces import BlockchainInterface
|
||||
from nucypher.cli.painting import BANNER
|
||||
from nucypher.characters.banners import BANNER
|
||||
from nucypher.cli.types import EIP55_CHECKSUM_ADDRESS, EXISTING_READABLE_FILE
|
||||
from nucypher.config.constants import DEFAULT_CONFIG_ROOT
|
||||
from nucypher.utilities.logging import getTextFileObserver
|
||||
|
|
|
@ -19,9 +19,10 @@ along with nucypher. If not, see <https://www.gnu.org/licenses/>.
|
|||
|
||||
import click
|
||||
|
||||
from nucypher.cli import moe, ursula, status, alice, bob, enrico
|
||||
from nucypher.characters.banners import BANNER
|
||||
from nucypher.cli import status
|
||||
from nucypher.cli.characters import moe, bob, ursula, alice, enrico
|
||||
from nucypher.cli.config import echo_version, nucypher_click_config
|
||||
from nucypher.cli.painting import BANNER
|
||||
from nucypher.utilities.logging import GlobalConsoleLogger
|
||||
|
||||
GlobalConsoleLogger.start_if_not_started()
|
||||
|
|
|
@ -24,24 +24,6 @@ from constant_sorrow.constants import NO_KNOWN_NODES
|
|||
from nucypher.config.characters import UrsulaConfiguration
|
||||
from nucypher.config.constants import SEEDNODES
|
||||
|
||||
#
|
||||
# Art
|
||||
#
|
||||
|
||||
BANNER = """
|
||||
_
|
||||
| |
|
||||
_ __ _ _ ___ _ _ _ __ | |__ ___ _ __
|
||||
| '_ \| | | |/ __| | | | '_ \| '_ \ / _ \ '__|
|
||||
| | | | |_| | (__| |_| | |_) | | | | __/ |
|
||||
|_| |_|\__,_|\___|\__, | .__/|_| |_|\___|_|
|
||||
__/ | |
|
||||
|___/|_|
|
||||
|
||||
version {}
|
||||
|
||||
""".format(nucypher.__version__)
|
||||
|
||||
|
||||
#
|
||||
# Paint
|
||||
|
|
|
@ -44,7 +44,12 @@ SEEDNODES = tuple()
|
|||
|
||||
|
||||
# Domains
|
||||
#If this domain is among those being learned or served, then domain checking is skipped.
|
||||
#A Learner learning about the GLOBAL_DOMAIN will learn about all nodes.
|
||||
#A Teacher serving the GLOBAL_DOMAIN will teach about all nodes.
|
||||
"""
|
||||
If this domain is among those being learned or served, then domain checking is skipped.
|
||||
A Learner learning about the GLOBAL_DOMAIN will learn about all nodes.
|
||||
A Teacher serving the GLOBAL_DOMAIN will teach about all nodes.
|
||||
"""
|
||||
GLOBAL_DOMAIN = b'GLOBAL_DOMAIN'
|
||||
|
||||
# Sentry
|
||||
NUCYPHER_SENTRY_ENDPOINT = "https://d8af7c4d692e4692a455328a280d845e@sentry.io/1310685" # TODO: Use nucypher DNS domain
|
||||
|
|
|
@ -522,86 +522,8 @@ class TemporaryFileBasedNodeStorage(LocalFileBasedNodeStorage):
|
|||
return bool(os.path.isdir(self.metadata_dir) and os.path.isdir(self.certificates_dir))
|
||||
|
||||
|
||||
class S3NodeStorage(NodeStorage):
|
||||
_name = 's3'
|
||||
S3_ACL = 'private' # Canned S3 Permissions
|
||||
|
||||
def __init__(self,
|
||||
bucket_name: str,
|
||||
s3_resource=None,
|
||||
*args, **kwargs) -> None:
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
self.__bucket_name = bucket_name
|
||||
self.__s3client = boto3.client('s3')
|
||||
self.__s3resource = s3_resource or boto3.resource('s3')
|
||||
self.__bucket = NO_STORAGE_AVAILIBLE
|
||||
|
||||
@property
|
||||
def bucket(self):
|
||||
return self.__bucket
|
||||
|
||||
@property
|
||||
def bucket_name(self):
|
||||
return self.__bucket_name
|
||||
|
||||
def __read(self, node_obj: str):
|
||||
try:
|
||||
node_object_metadata = node_obj.get()
|
||||
except ClientError:
|
||||
raise self.UnknownNode
|
||||
node_bytes = self.deserializer(node_object_metadata['Body'].read())
|
||||
node = self.character_class.from_bytes(node_bytes)
|
||||
return node
|
||||
|
||||
@validate_checksum_address
|
||||
def generate_presigned_url(self, checksum_address: str) -> str:
|
||||
payload = {'Bucket': self.__bucket_name, 'Key': checksum_address}
|
||||
url = self.__s3client.generate_presigned_url('get_object', payload, ExpiresIn=900)
|
||||
return url
|
||||
|
||||
def all(self, federated_only: bool, certificates_only: bool = False) -> set:
|
||||
node_objs = self.__bucket.objects.all()
|
||||
nodes = set()
|
||||
for node_obj in node_objs:
|
||||
node = self.__read(node_obj=node_obj)
|
||||
nodes.add(node)
|
||||
return nodes
|
||||
|
||||
@validate_checksum_address
|
||||
def get(self, checksum_address: str, federated_only: bool):
|
||||
node_obj = self.__bucket.Object(checksum_address)
|
||||
node = self.__read(node_obj=node_obj)
|
||||
return node
|
||||
|
||||
def store_node_metadata(self, node):
|
||||
self.__s3client.put_object(Bucket=self.__bucket_name,
|
||||
ACL=self.S3_ACL,
|
||||
Key=node.checksum_public_address,
|
||||
Body=self.serializer(bytes(node)))
|
||||
|
||||
@validate_checksum_address
|
||||
def remove(self, checksum_address: str) -> bool:
|
||||
node_obj = self.__bucket.Object(checksum_address)
|
||||
response = node_obj.delete()
|
||||
if response['ResponseMetadata']['HTTPStatusCode'] != 204:
|
||||
raise self.NodeStorageError("S3 Storage failed to delete node {}".format(checksum_address))
|
||||
return True
|
||||
|
||||
def payload(self) -> dict:
|
||||
payload = {
|
||||
self._TYPE_LABEL: self._name,
|
||||
'bucket_name': self.__bucket_name
|
||||
}
|
||||
return payload
|
||||
|
||||
@classmethod
|
||||
def from_payload(cls, payload: dict, *args, **kwargs):
|
||||
return cls(bucket_name=payload['bucket_name'], *args, **kwargs)
|
||||
|
||||
def initialize(self):
|
||||
self.__bucket = self.__s3resource.Bucket(self.__bucket_name)
|
||||
|
||||
|
||||
### Node Storage Registry ###
|
||||
NODE_STORAGES = {storage_class._name: storage_class for storage_class in NodeStorage.__subclasses__()}
|
||||
#
|
||||
# Node Storage Registry
|
||||
#
|
||||
NODE_STORAGES = {storage_class._name: storage_class
|
||||
for storage_class in NodeStorage.__subclasses__()}
|
||||
|
|
|
@ -15,14 +15,10 @@ You should have received a copy of the GNU Affero General Public License
|
|||
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import boto3
|
||||
import pytest
|
||||
import requests
|
||||
from moto import mock_s3
|
||||
|
||||
from nucypher.characters.lawful import Ursula
|
||||
from nucypher.config.storages import (
|
||||
S3NodeStorage,
|
||||
ForgetfulNodeStorage,
|
||||
TemporaryFileBasedNodeStorage,
|
||||
NodeStorage
|
||||
|
@ -96,11 +92,9 @@ class BaseTestNodeStorageBackends:
|
|||
assert all_stored_nodes == set()
|
||||
return True
|
||||
|
||||
|
||||
#
|
||||
# Storage Backend Tests
|
||||
#
|
||||
|
||||
def test_delete_node_in_storage(self, light_ursula):
|
||||
assert self._write_and_delete_metadata(ursula=light_ursula, node_storage=self.storage_backend)
|
||||
|
||||
|
@ -118,87 +112,3 @@ class TestTemporaryFileBasedNodeStorage(BaseTestNodeStorageBackends):
|
|||
storage_backend = TemporaryFileBasedNodeStorage(character_class=BaseTestNodeStorageBackends.character_class,
|
||||
federated_only=BaseTestNodeStorageBackends.federated_only)
|
||||
storage_backend.initialize()
|
||||
|
||||
|
||||
@pytest.mark.skip("Fails after moto / boto update: Needs investigation")
|
||||
class TestS3NodeStorageDirect(BaseTestNodeStorageBackends):
|
||||
|
||||
@mock_s3
|
||||
def setup_class(self):
|
||||
conn = boto3.resource('s3')
|
||||
# We need to create the __bucket since this is all in Moto's 'virtual' AWS account
|
||||
conn.create_bucket(Bucket=MOCK_S3_BUCKET_NAME, ACL=S3NodeStorage.S3_ACL)
|
||||
|
||||
storage_backend = S3NodeStorage(character_class=BaseTestNodeStorageBackends.character_class,
|
||||
federated_only=BaseTestNodeStorageBackends.federated_only,
|
||||
bucket_name=MOCK_S3_BUCKET_NAME,
|
||||
s3_resource=conn
|
||||
)
|
||||
storage_backend.initialize()
|
||||
|
||||
@mock_s3
|
||||
def test_generate_presigned_url(self, light_ursula):
|
||||
s3_node_storage = self.s3_node_storage_factory()
|
||||
s3_node_storage.store_node_metadata(node=light_ursula)
|
||||
presigned_url = s3_node_storage.generate_presigned_url(checksum_address=light_ursula.checksum_public_address)
|
||||
|
||||
assert S3_DOMAIN_NAME in presigned_url
|
||||
assert MOCK_S3_BUCKET_NAME in presigned_url
|
||||
assert light_ursula.checksum_public_address in presigned_url
|
||||
|
||||
moto_response = requests.get(presigned_url)
|
||||
assert moto_response.status_code == 200
|
||||
|
||||
@mock_s3
|
||||
def test_read_and_write_to_storage(self, light_ursula):
|
||||
s3_node_storage = self.s3_node_storage_factory()
|
||||
|
||||
# Write Node
|
||||
s3_node_storage.store_node_metadata(node=light_ursula)
|
||||
|
||||
# Read Node
|
||||
node_from_storage = s3_node_storage.get(checksum_address=light_ursula.checksum_public_address,
|
||||
federated_only=True)
|
||||
assert light_ursula == node_from_storage, "Node storage {} failed".format(s3_node_storage)
|
||||
|
||||
# Save more nodes
|
||||
all_known_nodes = set()
|
||||
for port in range(10152, 10251):
|
||||
node = Ursula(rest_host='127.0.0.1', rest_port=port, federated_only=True)
|
||||
s3_node_storage.store_node_metadata(node=node)
|
||||
all_known_nodes.add(node)
|
||||
|
||||
# Read all nodes from storage
|
||||
all_stored_nodes = s3_node_storage.all(federated_only=True)
|
||||
all_known_nodes.add(light_ursula)
|
||||
assert len(all_known_nodes) == len(all_stored_nodes)
|
||||
assert all_stored_nodes == all_known_nodes
|
||||
|
||||
# Read random nodes
|
||||
for i in range(3):
|
||||
random_node = all_known_nodes.pop()
|
||||
random_node_from_storage = s3_node_storage.get(checksum_address=random_node.checksum_public_address,
|
||||
federated_only=True)
|
||||
assert random_node.checksum_public_address == random_node_from_storage.checksum_public_address
|
||||
|
||||
return True
|
||||
|
||||
@mock_s3
|
||||
def test_write_and_delete_nodes_in_storage(self, light_ursula):
|
||||
s3_node_storage = self.s3_node_storage_factory()
|
||||
|
||||
# Write Node
|
||||
s3_node_storage.store_node_metadata(node=light_ursula)
|
||||
|
||||
# Delete Node
|
||||
s3_node_storage.remove(checksum_address=light_ursula.checksum_public_address)
|
||||
|
||||
# Read Node
|
||||
with pytest.raises(NodeStorage.UnknownNode):
|
||||
_node_from_storage = s3_node_storage.get(checksum_address=light_ursula.checksum_public_address,
|
||||
federated_only=True)
|
||||
|
||||
# Read all nodes from storage
|
||||
all_stored_nodes = s3_node_storage.all(federated_only=True)
|
||||
assert all_stored_nodes == set()
|
||||
return True
|
||||
|
|
Loading…
Reference in New Issue