mirror of https://github.com/nucypher/nucypher.git
Merge pull request #2475 from vepkenez/cloud-fixers
Cloudworkers: bugfixes, better cleaner envvar supportpull/2489/head
commit
1b5de0a266
|
@ -52,8 +52,7 @@
|
|||
become: yes
|
||||
become_user: nucypher
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYRING_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula config --provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}} --config-file /root/.local/share/nucypher/ursula.json"
|
||||
environment:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
environment: "{{runtime_envvars}}"
|
||||
|
||||
- name: "Backup Worker Nucypher Keystore locally to: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
|
@ -83,11 +82,7 @@
|
|||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}"
|
||||
NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}"
|
||||
NUCYPHER_SENTRY_LOGS: "{{SENTRY_LOGS | default('no')}}"
|
||||
env: "{{runtime_envvars}}"
|
||||
|
||||
- name: "wait a few seconds for the seed node to become available"
|
||||
when: SEED_NODE_URI is not undefined
|
||||
|
@ -115,11 +110,8 @@
|
|||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}"
|
||||
NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}"
|
||||
NUCYPHER_SENTRY_LOGS: "{{SENTRY_LOGS | default('no')}}"
|
||||
env: "{{runtime_envvars}}"
|
||||
|
||||
|
||||
- name: "Get LogPath"
|
||||
become: yes
|
||||
|
|
|
@ -60,10 +60,11 @@ Some examples:
|
|||
|
||||
# --------------------------------------------------------------------------------------------------------------------------- #
|
||||
# NOTE: if no --remote-provider is specified, geth will be run on the host and a larger instance with more RAM will be used.
|
||||
# this will probably cost more and require some time to sync.
|
||||
# this will probably cost more and require some time to sync. * A remote provider such as Alchemy or Infura is highly recommended *
|
||||
# --------------------------------------------------------------------------------------------------------------------------- #
|
||||
|
||||
# on AWS
|
||||
# configure your local aws cli with named profiles https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html
|
||||
$ nucypher cloudworkers up --cloudprovider aws --aws-profile my-aws-profile --remote-provider http://mainnet.infura..3epifj3rfioj
|
||||
|
||||
# add your ubuntu machine at the office to an existing locally managed stake
|
||||
|
@ -86,6 +87,9 @@ Some examples:
|
|||
# deploy nucypher on all your managed hosts
|
||||
$ nucypher cloudworkers deploy --remote-provider http://mainnet.infura..3epifj3rfioj
|
||||
|
||||
# set some environment variables to configure Ursula workers on all your hosts
|
||||
$ nucypher cloudworkers deploy -e NUCYPHER_MAX_GAS_PRICE_GWEI=35 -e DONT_PERFORM_WORK_ON_SUNDAY=true
|
||||
|
||||
# print the current status of all workers across all namespaces (in bash)
|
||||
$ for ns in $(nucypher cloudworkers list-namespaces); do nucypher cloudworkers status --namespace $ns; done
|
||||
> local nickname: Project11-mainnet-2
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Fix bad cli handling in several cloudworkers commands, improved envvar handling.
|
|
@ -53,13 +53,13 @@ def cloudworkers():
|
|||
@click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided, nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes. (default is nucypher/nucypher:latest)", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--include-stakeholder', 'stakes', help="limit worker to specified stakeholder addresses", multiple=True)
|
||||
@click.option('--wipe', help="Clear nucypher configs on existing nodes and start a fresh node with new keys.", default=False, is_flag=True)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders')
|
||||
@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
|
||||
@group_general_config
|
||||
def up(general_config, staker_options, config_file, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, sentry_dsn, stakes, wipe, prometheus, namespace):
|
||||
def up(general_config, staker_options, config_file, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, stakes, wipe, prometheus, namespace, envvars):
|
||||
"""Creates workers for all stakes owned by the user for the given network."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
@ -78,7 +78,8 @@ def up(general_config, staker_options, config_file, cloudprovider, aws_profile,
|
|||
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file, remote_provider, nucypher_image, seed_network, sentry_dsn, aws_profile, prometheus, namespace=namespace, network=STAKEHOLDER.network)
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file, remote_provider,
|
||||
nucypher_image, seed_network, aws_profile, prometheus, namespace=namespace, network=STAKEHOLDER.network, envvars=envvars)
|
||||
if staker_addresses:
|
||||
config = deployer.create_nodes(staker_addresses)
|
||||
|
||||
|
@ -93,13 +94,13 @@ def up(general_config, staker_options, config_file, cloudprovider, aws_profile,
|
|||
@click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided, nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes. (default is nucypher/nucypher:latest)", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@click.option('--count', help="Create this many nodes.", type=click.INT, default=1)
|
||||
@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders')
|
||||
@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet')
|
||||
@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
|
||||
@group_general_config
|
||||
def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, sentry_dsn, prometheus, count, namespace, network):
|
||||
def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, prometheus, count, namespace, network, envvars):
|
||||
"""Creates the required number of workers to be staked later under a namespace"""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
@ -108,10 +109,8 @@ def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher
|
|||
emitter.echo("Ansible is required to use this command. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, None, None, remote_provider, nucypher_image, seed_network, sentry_dsn, aws_profile, prometheus, namespace=namespace, network=network)
|
||||
if not namespace:
|
||||
emitter.echo("A namespace is required. Choose something to help differentiate between hosts, such as their specific purpose, or even just today's date.", color="red")
|
||||
return
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, None, None, remote_provider, nucypher_image, seed_network,
|
||||
aws_profile, prometheus, namespace=namespace, network=network, envvars=envvars)
|
||||
|
||||
names = []
|
||||
i = 1
|
||||
|
@ -140,7 +139,7 @@ def add(general_config, host_address, login_name, key_path, ssh_port, host_nickn
|
|||
"""Adds an existing node to the local config for future management."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
name = f'{namespace}-{network}-{host_nickname}'
|
||||
name = host_nickname
|
||||
|
||||
deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, namespace=namespace, network=network, action='add')
|
||||
config = deployer.create_nodes([name], host_address, login_name, key_path, ssh_port)
|
||||
|
@ -157,7 +156,7 @@ def add(general_config, host_address, login_name, key_path, ssh_port, host_nickn
|
|||
@click.option('--ssh-port', help="The port this host's ssh daemon is listening on", default=22)
|
||||
@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders')
|
||||
@group_general_config
|
||||
def add_for_stake(general_config, staker_address, host_address, login_name, key_path, ssh_port, namespace):
|
||||
def add_for_stake(general_config, staker_options, config_file, staker_address, host_address, login_name, key_path, ssh_port, namespace):
|
||||
"""Sets an existing node as the host for the given staker address."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
@ -185,15 +184,15 @@ def add_for_stake(general_config, staker_address, host_address, login_name, key_
|
|||
@click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--wipe', help="Clear your nucypher config and start a fresh node with new keys", default=False, is_flag=True)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders')
|
||||
@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet')
|
||||
@click.option('--gas-strategy', help="Which gas strategy? (glacial, slow, medium, fast)", type=click.STRING)
|
||||
@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
|
||||
@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
|
||||
@group_general_config
|
||||
def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry_dsn, wipe, prometheus, namespace, network, gas_strategy, include_hosts):
|
||||
def deploy(general_config, remote_provider, nucypher_image, seed_network, wipe, prometheus, namespace, network, gas_strategy, include_hosts, envvars):
|
||||
"""Deploys NuCypher on managed hosts."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
@ -202,7 +201,8 @@ def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry
|
|||
emitter.echo("Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
|
||||
deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, remote_provider, nucypher_image, seed_network, sentry_dsn, prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy)
|
||||
deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, remote_provider, nucypher_image, seed_network,
|
||||
prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy, envvars=envvars)
|
||||
|
||||
hostnames = deployer.config['instances'].keys()
|
||||
if include_hosts:
|
||||
|
@ -216,15 +216,15 @@ def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry
|
|||
@click.option('--remote-provider', help="The blockchain provider for the remote node – e.g. an Infura endpoint address. If not provided nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--wipe', help="Clear your nucypher config and start a fresh node with new keys", default=False, is_flag=True)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders')
|
||||
@click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet')
|
||||
@click.option('--gas-strategy', help="Which gas strategy? (glacial, slow, medium, fast)", type=click.STRING)
|
||||
@click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING)
|
||||
@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[])
|
||||
@group_general_config
|
||||
def update(general_config, remote_provider, nucypher_image, seed_network, sentry_dsn, wipe, prometheus, namespace, network, gas_strategy, include_hosts):
|
||||
def update(general_config, remote_provider, nucypher_image, seed_network, wipe, prometheus, namespace, network, gas_strategy, include_hosts, envvars):
|
||||
"""Updates existing installations of Nucypher on existing managed remote hosts."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
@ -235,11 +235,11 @@ def update(general_config, remote_provider, nucypher_image, seed_network, sentry
|
|||
|
||||
deployer = CloudDeployers.get_deployer('generic')(
|
||||
emitter, None, None, remote_provider, nucypher_image,
|
||||
seed_network, sentry_dsn,
|
||||
prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy
|
||||
seed_network,
|
||||
prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy, envvars=envvars
|
||||
)
|
||||
|
||||
emitter.echo(f"found deploying {nucypher_image} on the following existing hosts:")
|
||||
emitter.echo(f"updating the following existing hosts:")
|
||||
|
||||
hostnames = deployer.config['instances'].keys()
|
||||
if include_hosts:
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
import copy
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
import json
|
||||
import maya
|
||||
|
@ -36,7 +37,7 @@ from ansible.executor.playbook_executor import PlaybookExecutor
|
|||
from ansible import context as ansible_context
|
||||
from ansible.module_utils.common.collections import ImmutableDict
|
||||
|
||||
from nucypher.config.constants import DEFAULT_CONFIG_ROOT
|
||||
from nucypher.config.constants import DEFAULT_CONFIG_ROOT, DEPLOY_DIR, NUCYPHER_ENVVAR_KEYRING_PASSWORD, NUCYPHER_ENVVAR_WORKER_ETH_PASSWORD
|
||||
from nucypher.blockchain.eth.clients import PUBLIC_CHAINS
|
||||
from nucypher.blockchain.eth.networks import NetworksInventory
|
||||
|
||||
|
@ -173,7 +174,6 @@ class BaseCloudNodeConfigurator:
|
|||
blockchain_provider=None,
|
||||
nucypher_image=None,
|
||||
seed_network=False,
|
||||
sentry_dsn=None,
|
||||
profile=None,
|
||||
prometheus=False,
|
||||
pre_config=False,
|
||||
|
@ -181,6 +181,7 @@ class BaseCloudNodeConfigurator:
|
|||
namespace=None,
|
||||
gas_strategy=None,
|
||||
action=None,
|
||||
envvars=None,
|
||||
):
|
||||
|
||||
self.emitter = emitter
|
||||
|
@ -188,6 +189,11 @@ class BaseCloudNodeConfigurator:
|
|||
self.network = network
|
||||
self.namespace = namespace or 'local-stakeholders'
|
||||
self.action = action
|
||||
self.envvars = envvars or []
|
||||
if self.envvars:
|
||||
if not all([ (len(v.split('=')) == 2) for v in self.envvars]):
|
||||
raise ValueError("Improperly specified environment variables: --env variables must be specified in pairs as `<name>=<value>`")
|
||||
self.envvars = [v.split('=') for v in (self.envvars)]
|
||||
|
||||
self.config_filename = f'{self.network}-{self.namespace}.json'
|
||||
|
||||
|
@ -229,13 +235,11 @@ class BaseCloudNodeConfigurator:
|
|||
self.host_level_overrides = {
|
||||
'blockchain_provider': blockchain_provider,
|
||||
'nucypher_image': nucypher_image,
|
||||
'sentry_dsn': sentry_dsn,
|
||||
'gas_strategy': f'--gas-strategy {gas_strategy}' if gas_strategy else '',
|
||||
}
|
||||
|
||||
self.config['blockchain_provider'] = blockchain_provider or self.config.get('blockchain_provider') or f'/root/.local/share/geth/.ethereum/{self.chain_name}/geth.ipc' # the default for nodes that run their own geth container
|
||||
self.config['nucypher_image'] = nucypher_image or self.config.get('nucypher_image') or 'nucypher/nucypher:latest'
|
||||
self.config['sentry_dsn'] = sentry_dsn or self.config.get('sentry_dsn')
|
||||
self.config['gas_strategy'] = f'--gas-strategy {gas_strategy}' if gas_strategy else self.config.get('gas-strategy', '')
|
||||
|
||||
self.config['seed_network'] = seed_network if seed_network is not None else self.config.get('seed_network')
|
||||
|
@ -262,7 +266,7 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
@property
|
||||
def network_config_path(self):
|
||||
return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, self.network)
|
||||
return Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, self.network)
|
||||
|
||||
@property
|
||||
def _provider_deploy_attrs(self):
|
||||
|
@ -287,18 +291,52 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
@property
|
||||
def inventory_path(self):
|
||||
return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml')
|
||||
return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml'))
|
||||
|
||||
def generate_ansible_inventory(self, node_names, **kwargs):
|
||||
def update_generate_inventory(self, node_names, **kwargs):
|
||||
|
||||
# filter out the nodes we will not be dealing with
|
||||
nodes = {key: value for key, value in self.config['instances'].items() if key in node_names}
|
||||
if not nodes:
|
||||
raise KeyError(f"No hosts matched the supplied names: {node_names}. Try `nucypher cloudworkers list-hosts`")
|
||||
|
||||
default_envvars = [
|
||||
(NUCYPHER_ENVVAR_KEYRING_PASSWORD, self.config['keyringpassword']),
|
||||
(NUCYPHER_ENVVAR_WORKER_ETH_PASSWORD, self.config['ethpassword']),
|
||||
]
|
||||
|
||||
input_envvars = [(k, v) for k, v in self.envvars]
|
||||
|
||||
# populate the specified environment variables as well as the
|
||||
# defaults that are only used in the inventory
|
||||
for key, node in nodes.items():
|
||||
node_vars = nodes[key].get('runtime_envvars', {})
|
||||
for k, v in input_envvars:
|
||||
node_vars.update({k: v})
|
||||
nodes[key]['runtime_envvars'] = node_vars
|
||||
|
||||
# we want to update the config with the specified envvars
|
||||
# so they will persist in future invocations
|
||||
self.config['instances'][key] = copy.deepcopy(nodes[key])
|
||||
|
||||
# we don't want to save the default_envvars to the config file
|
||||
# but we do want them to be specified to the inventory template
|
||||
# but overridden on a per node basis if previously specified
|
||||
for key, node in nodes.items():
|
||||
for k, v in default_envvars:
|
||||
if not k in nodes[key]['runtime_envvars']:
|
||||
nodes[key]['runtime_envvars'][k] = v
|
||||
|
||||
inventory_content = self._inventory_template.render(
|
||||
deployer=self,
|
||||
nodes=[value for key, value in self.config['instances'].items() if key in node_names],
|
||||
nodes=nodes.values(),
|
||||
extra=kwargs
|
||||
)
|
||||
|
||||
with open(self.inventory_path, 'w') as outfile:
|
||||
outfile.write(inventory_content)
|
||||
|
||||
# now that everything rendered correctly, save how we got there.
|
||||
self._write_config()
|
||||
|
||||
return self.inventory_path
|
||||
|
@ -338,7 +376,7 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
def deploy_nucypher_on_existing_nodes(self, node_names, wipe_nucypher=False):
|
||||
|
||||
playbook = 'deploy/ansible/worker/setup_remote_workers.yml'
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/setup_remote_workers.yml')
|
||||
|
||||
# first update any specified input in our node config
|
||||
for k, input_specified_value in self.host_level_overrides.items():
|
||||
|
@ -361,7 +399,7 @@ class BaseCloudNodeConfigurator:
|
|||
self.config['seed_node'] = list(self.config['instances'].values())[0]['publicaddress']
|
||||
self._write_config()
|
||||
|
||||
self.generate_ansible_inventory(node_names, wipe_nucypher=wipe_nucypher)
|
||||
self.update_generate_inventory(node_names, wipe_nucypher=wipe_nucypher)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -381,10 +419,9 @@ class BaseCloudNodeConfigurator:
|
|||
self.update_captured_instance_data(self.output_capture)
|
||||
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
||||
|
||||
|
||||
def update_nucypher_on_existing_nodes(self, node_names):
|
||||
|
||||
playbook = 'deploy/ansible/worker/update_remote_workers.yml'
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/update_remote_workers.yml')
|
||||
|
||||
# first update any specified input in our node config
|
||||
for k, input_specified_value in self.host_level_overrides.items():
|
||||
|
@ -402,7 +439,7 @@ class BaseCloudNodeConfigurator:
|
|||
self.config['seed_node'] = list(self.config['instances'].values())[0]['publicaddress']
|
||||
self._write_config()
|
||||
|
||||
self.generate_ansible_inventory(node_names)
|
||||
self.update_generate_inventory(node_names)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -422,12 +459,11 @@ class BaseCloudNodeConfigurator:
|
|||
self.update_captured_instance_data(self.output_capture)
|
||||
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
||||
|
||||
|
||||
def get_worker_status(self, node_names):
|
||||
|
||||
playbook = 'deploy/ansible/worker/get_workers_status.yml'
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/get_workers_status.yml')
|
||||
|
||||
self.generate_ansible_inventory(node_names)
|
||||
self.update_generate_inventory(node_names)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -447,12 +483,11 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
self.give_helpful_hints(node_names, playbook=playbook)
|
||||
|
||||
|
||||
def print_worker_logs(self, node_names):
|
||||
|
||||
playbook = 'deploy/ansible/worker/get_worker_logs.yml'
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/get_worker_logs.yml')
|
||||
|
||||
self.generate_ansible_inventory(node_names)
|
||||
self.update_generate_inventory(node_names)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -472,11 +507,10 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
self.give_helpful_hints(node_names, playbook=playbook)
|
||||
|
||||
|
||||
def backup_remote_data(self, node_names):
|
||||
|
||||
playbook = 'deploy/ansible/worker/backup_remote_workers.yml'
|
||||
self.generate_ansible_inventory(node_names)
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/backup_remote_workers.yml')
|
||||
self.update_generate_inventory(node_names)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -497,9 +531,9 @@ class BaseCloudNodeConfigurator:
|
|||
|
||||
def restore_from_backup(self, target_host, source_path):
|
||||
|
||||
playbook = 'deploy/ansible/worker/restore_ursula_from_backup.yml'
|
||||
playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/restore_ursula_from_backup.yml')
|
||||
|
||||
self.generate_ansible_inventory([target_host], restore_path=source_path)
|
||||
self.update_generate_inventory([target_host], restore_path=source_path)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
|
@ -585,7 +619,7 @@ class BaseCloudNodeConfigurator:
|
|||
)
|
||||
self.emitter.echo(f"\t{dep.format_ssh_cmd(host_data)}", color="yellow")
|
||||
if backup:
|
||||
self.emitter.echo(" *** Local backups containing sensitive data have been created. ***", color="red")
|
||||
self.emitter.echo(" *** Local backups containing sensitive data may have been created. ***", color="red")
|
||||
self.emitter.echo(f" Backup data can be found here: {self.config_dir}/remote_worker_backups/")
|
||||
|
||||
def format_ssh_cmd(self, host_data):
|
||||
|
@ -783,7 +817,7 @@ class AWSNodeConfigurator(BaseCloudNodeConfigurator):
|
|||
|
||||
def _create_keypair(self):
|
||||
new_keypair_data = self.ec2Client.create_key_pair(KeyName=f'{self.namespace_network}')
|
||||
outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair')
|
||||
outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair')
|
||||
os.makedirs(os.path.dirname(outpath), exist_ok=True)
|
||||
with open(outpath, 'w') as outfile:
|
||||
outfile.write(new_keypair_data['KeyMaterial'])
|
||||
|
@ -796,7 +830,7 @@ class AWSNodeConfigurator(BaseCloudNodeConfigurator):
|
|||
# only use self.namespace here to avoid accidental deletions of pre-existing keypairs
|
||||
deleted_keypair_data = self.ec2Client.delete_key_pair(KeyName=f'{self.namespace_network}')
|
||||
if deleted_keypair_data['HTTPStatusCode'] == 200:
|
||||
outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair')
|
||||
outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair')
|
||||
os.remove(outpath)
|
||||
self.emitter.echo(f"keypair at {outpath}, was deleted", color='yellow')
|
||||
|
||||
|
|
|
@ -14,8 +14,6 @@ all:
|
|||
etherscan_domain: ${deployer.chain_name}.etherscan.io
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
ansible_connection: ssh
|
||||
NUCYPHER_KEYRING_PASSWORD: ${deployer.config['keyringpassword']}
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: ${deployer.config['ethpassword']}
|
||||
nucypher_image: ${deployer.config['nucypher_image']}
|
||||
gas_strategy: ${deployer.config['gas_strategy']}
|
||||
blockchain_provider: ${deployer.config['blockchain_provider']}
|
||||
|
@ -32,17 +30,13 @@ all:
|
|||
SEED_NODE_URI:
|
||||
teacher_options: ""
|
||||
%endif
|
||||
%if deployer.config.get('sentry_dsn'):
|
||||
SENTRY_DSN: ${deployer.config['sentry_dsn']}
|
||||
NUCYPHER_SENTRY_LOGS: yes
|
||||
%endif
|
||||
wipe_nucypher_config: ${extra.get('wipe_nucypher', False)}
|
||||
deployer_config_path: ${deployer.config_dir}
|
||||
restore_path: ${extra.get('restore_path')}
|
||||
hosts:
|
||||
%for node in nodes:
|
||||
${node['publicaddress']}:
|
||||
host_nickname: ${node['host_nickname']}
|
||||
host_nickname: "${node['host_nickname']}"
|
||||
%for attr in node['provider_deploy_attrs']:
|
||||
${attr['key']}: ${attr['value']}
|
||||
%endfor
|
||||
|
@ -52,10 +46,11 @@ all:
|
|||
%if node.get('nucypher_image'):
|
||||
nucypher_image: ${node['nucypher_image']}
|
||||
%endif
|
||||
%if node.get('sentry_dsn'):
|
||||
sentry_dsn: ${node['sentry_dsn']}
|
||||
%endif
|
||||
%if node.get('gas_strategy'):
|
||||
gas_strategy: ${node['gas_strategy']}
|
||||
%endif
|
||||
runtime_envvars:
|
||||
%for key, val in node['runtime_envvars'].items():
|
||||
${key}: "${val}"
|
||||
%endfor
|
||||
%endfor
|
||||
|
|
Loading…
Reference in New Issue