From 1dbe6a22b11023b22424911a0ea38c697c507459 Mon Sep 17 00:00:00 2001 From: damon Date: Fri, 11 Dec 2020 04:27:30 -0800 Subject: [PATCH 1/9] fix cli args https://github.com/nucypher/nucypher/issues/2473 --- nucypher/cli/commands/cloudworkers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nucypher/cli/commands/cloudworkers.py b/nucypher/cli/commands/cloudworkers.py index 1014335f8..5633a1850 100644 --- a/nucypher/cli/commands/cloudworkers.py +++ b/nucypher/cli/commands/cloudworkers.py @@ -157,7 +157,7 @@ def add(general_config, host_address, login_name, key_path, ssh_port, host_nickn @click.option('--ssh-port', help="The port this host's ssh daemon is listening on", default=22) @click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders') @group_general_config -def add_for_stake(general_config, staker_address, host_address, login_name, key_path, ssh_port, namespace): +def add_for_stake(general_config, staker_options, config_file, staker_address, host_address, login_name, key_path, ssh_port, namespace): """Sets an existing node as the host for the given staker address.""" emitter = setup_emitter(general_config) From a41429630a52f9c000e9dbbe64e776252a8d6236 Mon Sep 17 00:00:00 2001 From: damon Date: Fri, 11 Dec 2020 18:04:47 -0800 Subject: [PATCH 2/9] per node envvars: fixes https://github.com/nucypher/nucypher/issues/2472 --- deploy/ansible/worker/include/run_ursula.yml | 16 ++--- nucypher/cli/commands/cloudworkers.py | 34 +++++----- nucypher/utilities/clouddeploy.py | 63 ++++++++++++++----- .../cloud_deploy_ansible_inventory.mako | 15 ++--- 4 files changed, 73 insertions(+), 55 deletions(-) diff --git a/deploy/ansible/worker/include/run_ursula.yml b/deploy/ansible/worker/include/run_ursula.yml index bc7af5496..45271faef 100644 --- a/deploy/ansible/worker/include/run_ursula.yml +++ b/deploy/ansible/worker/include/run_ursula.yml @@ -52,8 +52,7 @@ become: yes become_user: nucypher command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYRING_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula config --provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}} --config-file /root/.local/share/nucypher/ursula.json" - environment: - NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}" + environment: "{{runtime_envvars}}" - name: "Backup Worker Nucypher Keystore locally to: {{deployer_config_path}}/remote_worker_backups/" become: yes @@ -83,11 +82,7 @@ ports: - "9151:9151" - "9101:9101" - env: - NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}" - NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}" - NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}" - NUCYPHER_SENTRY_LOGS: "{{SENTRY_LOGS | default('no')}}" + env: "{{runtime_envvars}}" - name: "wait a few seconds for the seed node to become available" when: SEED_NODE_URI is not undefined @@ -115,11 +110,8 @@ ports: - "9151:9151" - "9101:9101" - env: - NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}" - NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}" - NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}" - NUCYPHER_SENTRY_LOGS: "{{SENTRY_LOGS | default('no')}}" + env: "{{runtime_envvars}}" + - name: "Get LogPath" become: yes diff --git a/nucypher/cli/commands/cloudworkers.py b/nucypher/cli/commands/cloudworkers.py index 5633a1850..1ca8a26fe 100644 --- a/nucypher/cli/commands/cloudworkers.py +++ b/nucypher/cli/commands/cloudworkers.py @@ -53,13 +53,13 @@ def cloudworkers(): @click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided, nodes will run geth.", default=None) @click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes. (default is nucypher/nucypher:latest)", default=None) @click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True) -@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None) @click.option('--include-stakeholder', 'stakes', help="limit worker to specified stakeholder addresses", multiple=True) @click.option('--wipe', help="Clear nucypher configs on existing nodes and start a fresh node with new keys.", default=False, is_flag=True) @click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True) @click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders') +@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[]) @group_general_config -def up(general_config, staker_options, config_file, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, sentry_dsn, stakes, wipe, prometheus, namespace): +def up(general_config, staker_options, config_file, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, stakes, wipe, prometheus, namespace, envvars): """Creates workers for all stakes owned by the user for the given network.""" emitter = setup_emitter(general_config) @@ -78,7 +78,8 @@ def up(general_config, staker_options, config_file, cloudprovider, aws_profile, config_file = config_file or StakeHolderConfiguration.default_filepath() - deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file, remote_provider, nucypher_image, seed_network, sentry_dsn, aws_profile, prometheus, namespace=namespace, network=STAKEHOLDER.network) + deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file, remote_provider, + nucypher_image, seed_network, aws_profile, prometheus, namespace=namespace, network=STAKEHOLDER.network, envvars=envvars) if staker_addresses: config = deployer.create_nodes(staker_addresses) @@ -93,13 +94,13 @@ def up(general_config, staker_options, config_file, cloudprovider, aws_profile, @click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided, nodes will run geth.", default=None) @click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes. (default is nucypher/nucypher:latest)", default=None) @click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True) -@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None) @click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True) @click.option('--count', help="Create this many nodes.", type=click.INT, default=1) @click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders') @click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet') +@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[]) @group_general_config -def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, sentry_dsn, prometheus, count, namespace, network): +def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, prometheus, count, namespace, network, envvars): """Creates the required number of workers to be staked later under a namespace""" emitter = setup_emitter(general_config) @@ -108,10 +109,8 @@ def create(general_config, cloudprovider, aws_profile, remote_provider, nucypher emitter.echo("Ansible is required to use this command. (Please run 'pip install ansible'.)", color="red") return - deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, None, None, remote_provider, nucypher_image, seed_network, sentry_dsn, aws_profile, prometheus, namespace=namespace, network=network) - if not namespace: - emitter.echo("A namespace is required. Choose something to help differentiate between hosts, such as their specific purpose, or even just today's date.", color="red") - return + deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, None, None, remote_provider, nucypher_image, seed_network, + aws_profile, prometheus, namespace=namespace, network=network, envvars=envvars) names = [] i = 1 @@ -185,15 +184,15 @@ def add_for_stake(general_config, staker_options, config_file, staker_address, h @click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided nodes will run geth.", default=None) @click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None) @click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True) -@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None) @click.option('--wipe', help="Clear your nucypher config and start a fresh node with new keys", default=False, is_flag=True) @click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True) @click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders') @click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet') @click.option('--gas-strategy', help="Which gas strategy? (glacial, slow, medium, fast)", type=click.STRING) @click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING) +@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[]) @group_general_config -def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry_dsn, wipe, prometheus, namespace, network, gas_strategy, include_hosts): +def deploy(general_config, remote_provider, nucypher_image, seed_network, wipe, prometheus, namespace, network, gas_strategy, include_hosts, envvars): """Deploys NuCypher on managed hosts.""" emitter = setup_emitter(general_config) @@ -202,7 +201,8 @@ def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry emitter.echo("Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)", color="red") return - deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, remote_provider, nucypher_image, seed_network, sentry_dsn, prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy) + deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, remote_provider, nucypher_image, seed_network, + prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy, envvars=envvars) hostnames = deployer.config['instances'].keys() if include_hosts: @@ -216,15 +216,15 @@ def deploy(general_config, remote_provider, nucypher_image, seed_network, sentry @click.option('--remote-provider', help="The blockchain provider for the remote node – e.g. an Infura endpoint address. If not provided nodes will run geth.", default=None) @click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None) @click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True) -@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None) @click.option('--wipe', help="Clear your nucypher config and start a fresh node with new keys", default=False, is_flag=True) @click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True) @click.option('--namespace', help="Namespace for these operations. Used to address hosts and data locally and name hosts on cloud platforms.", type=click.STRING, default='local-stakeholders') @click.option('--network', help="The Nucypher network name these hosts will run on.", type=click.STRING, default='mainnet') @click.option('--gas-strategy', help="Which gas strategy? (glacial, slow, medium, fast)", type=click.STRING) @click.option('--include-host', 'include_hosts', help="specify hosts to update", multiple=True, type=click.STRING) +@click.option('--env', '-e', 'envvars', help="environment variables (ENVVAR=VALUE)", multiple=True, type=click.STRING, default=[]) @group_general_config -def update(general_config, remote_provider, nucypher_image, seed_network, sentry_dsn, wipe, prometheus, namespace, network, gas_strategy, include_hosts): +def update(general_config, remote_provider, nucypher_image, seed_network, wipe, prometheus, namespace, network, gas_strategy, include_hosts, envvars): """Updates existing installations of Nucypher on existing managed remote hosts.""" emitter = setup_emitter(general_config) @@ -235,11 +235,11 @@ def update(general_config, remote_provider, nucypher_image, seed_network, sentry deployer = CloudDeployers.get_deployer('generic')( emitter, None, None, remote_provider, nucypher_image, - seed_network, sentry_dsn, - prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy + seed_network, + prometheus=prometheus, namespace=namespace, network=network, gas_strategy=gas_strategy, envvars=envvars ) - emitter.echo(f"found deploying {nucypher_image} on the following existing hosts:") + emitter.echo(f"updating the following existing hosts:") hostnames = deployer.config['instances'].keys() if include_hosts: diff --git a/nucypher/utilities/clouddeploy.py b/nucypher/utilities/clouddeploy.py index 102cf9917..e8dd2bf15 100644 --- a/nucypher/utilities/clouddeploy.py +++ b/nucypher/utilities/clouddeploy.py @@ -173,7 +173,6 @@ class BaseCloudNodeConfigurator: blockchain_provider=None, nucypher_image=None, seed_network=False, - sentry_dsn=None, profile=None, prometheus=False, pre_config=False, @@ -181,6 +180,7 @@ class BaseCloudNodeConfigurator: namespace=None, gas_strategy=None, action=None, + envvars=None, ): self.emitter = emitter @@ -188,6 +188,11 @@ class BaseCloudNodeConfigurator: self.network = network self.namespace = namespace or 'local-stakeholders' self.action = action + self.envvars = envvars or [] + if self.envvars: + if not all([ (len(v.split('=')) == 2) for v in self.envvars]): + raise ValueError("Improperly specified environment variables: --env variables must be specified in pairs as `=`") + self.envvars = [v.split('=') for v in (self.envvars)] self.config_filename = f'{self.network}-{self.namespace}.json' @@ -229,13 +234,11 @@ class BaseCloudNodeConfigurator: self.host_level_overrides = { 'blockchain_provider': blockchain_provider, 'nucypher_image': nucypher_image, - 'sentry_dsn': sentry_dsn, 'gas_strategy': f'--gas-strategy {gas_strategy}' if gas_strategy else '', } self.config['blockchain_provider'] = blockchain_provider or self.config.get('blockchain_provider') or f'/root/.local/share/geth/.ethereum/{self.chain_name}/geth.ipc' # the default for nodes that run their own geth container self.config['nucypher_image'] = nucypher_image or self.config.get('nucypher_image') or 'nucypher/nucypher:latest' - self.config['sentry_dsn'] = sentry_dsn or self.config.get('sentry_dsn') self.config['gas_strategy'] = f'--gas-strategy {gas_strategy}' if gas_strategy else self.config.get('gas-strategy', '') self.config['seed_network'] = seed_network if seed_network is not None else self.config.get('seed_network') @@ -289,16 +292,48 @@ class BaseCloudNodeConfigurator: def inventory_path(self): return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml') - def generate_ansible_inventory(self, node_names, **kwargs): + def update_generate_inventory(self, node_names, **kwargs): + + # filter out the nodes we will not be dealing with + nodes = {key: value for key, value in self.config['instances'].items() if key in node_names} + + default_envvars = [ + ('NUCYPHER_KEYRING_PASSWORD', self.config['keyringpassword']), + ('NUCYPHER_WORKER_ETH_PASSWORD', self.config['ethpassword']), + ] + + input_envvars = [(k, v) for k, v in self.envvars] + + # populate the specified environment variables as well as the + # defaults that are only used in the inventory + for key, node in nodes.items(): + node_vars = nodes[key].get('runtime_envvars', {}) + for k, v in input_envvars: + node_vars.update({k: v}) + nodes[key]['runtime_envvars'] = node_vars + + # we want to update the config with the specified envvars + # so they will persist in future invocations + self.config['instances'][key] = copy.deepcopy(nodes[key]) + + # we don't want to save the default_envvars to the config file + # but we do want them to be specified to the inventory template + # but overridden on a per node basis if previously specified + for key, node in nodes.items(): + for k, v in default_envvars: + if not k in nodes[key]['runtime_envvars']: + nodes[key]['runtime_envvars'][k] = v inventory_content = self._inventory_template.render( deployer=self, - nodes=[value for key, value in self.config['instances'].items() if key in node_names], + nodes=nodes.values(), extra=kwargs ) with open(self.inventory_path, 'w') as outfile: outfile.write(inventory_content) + + # now that everything rendered correctly, save how we got there. self._write_config() return self.inventory_path @@ -361,7 +396,7 @@ class BaseCloudNodeConfigurator: self.config['seed_node'] = list(self.config['instances'].values())[0]['publicaddress'] self._write_config() - self.generate_ansible_inventory(node_names, wipe_nucypher=wipe_nucypher) + self.update_generate_inventory(node_names, wipe_nucypher=wipe_nucypher) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -381,7 +416,6 @@ class BaseCloudNodeConfigurator: self.update_captured_instance_data(self.output_capture) self.give_helpful_hints(node_names, backup=True, playbook=playbook) - def update_nucypher_on_existing_nodes(self, node_names): playbook = 'deploy/ansible/worker/update_remote_workers.yml' @@ -402,7 +436,7 @@ class BaseCloudNodeConfigurator: self.config['seed_node'] = list(self.config['instances'].values())[0]['publicaddress'] self._write_config() - self.generate_ansible_inventory(node_names) + self.update_generate_inventory(node_names) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -422,12 +456,11 @@ class BaseCloudNodeConfigurator: self.update_captured_instance_data(self.output_capture) self.give_helpful_hints(node_names, backup=True, playbook=playbook) - def get_worker_status(self, node_names): playbook = 'deploy/ansible/worker/get_workers_status.yml' - self.generate_ansible_inventory(node_names) + self.update_generate_inventory(node_names) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -447,12 +480,11 @@ class BaseCloudNodeConfigurator: self.give_helpful_hints(node_names, playbook=playbook) - def print_worker_logs(self, node_names): playbook = 'deploy/ansible/worker/get_worker_logs.yml' - self.generate_ansible_inventory(node_names) + self.update_generate_inventory(node_names) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -472,11 +504,10 @@ class BaseCloudNodeConfigurator: self.give_helpful_hints(node_names, playbook=playbook) - def backup_remote_data(self, node_names): playbook = 'deploy/ansible/worker/backup_remote_workers.yml' - self.generate_ansible_inventory(node_names) + self.update_generate_inventory(node_names) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -499,7 +530,7 @@ class BaseCloudNodeConfigurator: playbook = 'deploy/ansible/worker/restore_ursula_from_backup.yml' - self.generate_ansible_inventory([target_host], restore_path=source_path) + self.update_generate_inventory([target_host], restore_path=source_path) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=self.inventory_path) @@ -585,7 +616,7 @@ class BaseCloudNodeConfigurator: ) self.emitter.echo(f"\t{dep.format_ssh_cmd(host_data)}", color="yellow") if backup: - self.emitter.echo(" *** Local backups containing sensitive data have been created. ***", color="red") + self.emitter.echo(" *** Local backups containing sensitive data may have been created. ***", color="red") self.emitter.echo(f" Backup data can be found here: {self.config_dir}/remote_worker_backups/") def format_ssh_cmd(self, host_data): diff --git a/nucypher/utilities/templates/cloud_deploy_ansible_inventory.mako b/nucypher/utilities/templates/cloud_deploy_ansible_inventory.mako index 216138a07..acfe1e48f 100644 --- a/nucypher/utilities/templates/cloud_deploy_ansible_inventory.mako +++ b/nucypher/utilities/templates/cloud_deploy_ansible_inventory.mako @@ -14,8 +14,6 @@ all: etherscan_domain: ${deployer.chain_name}.etherscan.io ansible_python_interpreter: /usr/bin/python3 ansible_connection: ssh - NUCYPHER_KEYRING_PASSWORD: ${deployer.config['keyringpassword']} - NUCYPHER_WORKER_ETH_PASSWORD: ${deployer.config['ethpassword']} nucypher_image: ${deployer.config['nucypher_image']} gas_strategy: ${deployer.config['gas_strategy']} blockchain_provider: ${deployer.config['blockchain_provider']} @@ -32,17 +30,13 @@ all: SEED_NODE_URI: teacher_options: "" %endif - %if deployer.config.get('sentry_dsn'): - SENTRY_DSN: ${deployer.config['sentry_dsn']} - NUCYPHER_SENTRY_LOGS: yes - %endif wipe_nucypher_config: ${extra.get('wipe_nucypher', False)} deployer_config_path: ${deployer.config_dir} restore_path: ${extra.get('restore_path')} hosts: %for node in nodes: ${node['publicaddress']}: - host_nickname: ${node['host_nickname']} + host_nickname: "${node['host_nickname']}" %for attr in node['provider_deploy_attrs']: ${attr['key']}: ${attr['value']} %endfor @@ -52,10 +46,11 @@ all: %if node.get('nucypher_image'): nucypher_image: ${node['nucypher_image']} %endif - %if node.get('sentry_dsn'): - sentry_dsn: ${node['sentry_dsn']} - %endif %if node.get('gas_strategy'): gas_strategy: ${node['gas_strategy']} %endif + runtime_envvars: + %for key, val in node['runtime_envvars'].items(): + ${key}: "${val}" + %endfor %endfor From 04adc9766cb9cdebb9e5ef9e525459224473e7a3 Mon Sep 17 00:00:00 2001 From: damon Date: Fri, 11 Dec 2020 18:14:25 -0800 Subject: [PATCH 3/9] adds newsfragment --- newsfragments/2475.bugfix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/2475.bugfix.rst diff --git a/newsfragments/2475.bugfix.rst b/newsfragments/2475.bugfix.rst new file mode 100644 index 000000000..e532825b1 --- /dev/null +++ b/newsfragments/2475.bugfix.rst @@ -0,0 +1 @@ +Fix bad cli handling in several cloudworkers commands, improved envvar handling. From 02eb38d5a807f4201b822de055c337fc0d45fb8d Mon Sep 17 00:00:00 2001 From: damon Date: Sat, 12 Dec 2020 17:34:13 -0800 Subject: [PATCH 4/9] make ansible playbooks root relative --- nucypher/utilities/clouddeploy.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nucypher/utilities/clouddeploy.py b/nucypher/utilities/clouddeploy.py index e8dd2bf15..f995baea4 100644 --- a/nucypher/utilities/clouddeploy.py +++ b/nucypher/utilities/clouddeploy.py @@ -36,7 +36,7 @@ from ansible.executor.playbook_executor import PlaybookExecutor from ansible import context as ansible_context from ansible.module_utils.common.collections import ImmutableDict -from nucypher.config.constants import DEFAULT_CONFIG_ROOT +from nucypher.config.constants import DEFAULT_CONFIG_ROOT, DEPLOY_DIR from nucypher.blockchain.eth.clients import PUBLIC_CHAINS from nucypher.blockchain.eth.networks import NetworksInventory @@ -373,7 +373,7 @@ class BaseCloudNodeConfigurator: def deploy_nucypher_on_existing_nodes(self, node_names, wipe_nucypher=False): - playbook = 'deploy/ansible/worker/setup_remote_workers.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/setup_remote_workers.yml') # first update any specified input in our node config for k, input_specified_value in self.host_level_overrides.items(): @@ -418,7 +418,7 @@ class BaseCloudNodeConfigurator: def update_nucypher_on_existing_nodes(self, node_names): - playbook = 'deploy/ansible/worker/update_remote_workers.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/update_remote_workers.yml') # first update any specified input in our node config for k, input_specified_value in self.host_level_overrides.items(): @@ -458,7 +458,7 @@ class BaseCloudNodeConfigurator: def get_worker_status(self, node_names): - playbook = 'deploy/ansible/worker/get_workers_status.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/get_workers_status.yml') self.update_generate_inventory(node_names) @@ -482,7 +482,7 @@ class BaseCloudNodeConfigurator: def print_worker_logs(self, node_names): - playbook = 'deploy/ansible/worker/get_worker_logs.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/get_worker_logs.yml') self.update_generate_inventory(node_names) @@ -506,7 +506,7 @@ class BaseCloudNodeConfigurator: def backup_remote_data(self, node_names): - playbook = 'deploy/ansible/worker/backup_remote_workers.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/backup_remote_workers.yml') self.update_generate_inventory(node_names) loader = DataLoader() @@ -528,7 +528,7 @@ class BaseCloudNodeConfigurator: def restore_from_backup(self, target_host, source_path): - playbook = 'deploy/ansible/worker/restore_ursula_from_backup.yml' + playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/restore_ursula_from_backup.yml') self.update_generate_inventory([target_host], restore_path=source_path) From 92ea9846f0b36f9011d048211cd01969b2f9308a Mon Sep 17 00:00:00 2001 From: damon Date: Sat, 12 Dec 2020 17:52:14 -0800 Subject: [PATCH 5/9] raise exception on conditions resulting in empty inventory --- nucypher/utilities/clouddeploy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nucypher/utilities/clouddeploy.py b/nucypher/utilities/clouddeploy.py index f995baea4..da4a93878 100644 --- a/nucypher/utilities/clouddeploy.py +++ b/nucypher/utilities/clouddeploy.py @@ -296,6 +296,8 @@ class BaseCloudNodeConfigurator: # filter out the nodes we will not be dealing with nodes = {key: value for key, value in self.config['instances'].items() if key in node_names} + if not nodes: + raise KeyError(f"No hosts matched the supplied names: {node_names}. Try `nucypher cloudworkers list-hosts`") default_envvars = [ ('NUCYPHER_KEYRING_PASSWORD', self.config['keyringpassword']), From 560d41ac0f3b6c61c37db27afb8fd7dab0c011a8 Mon Sep 17 00:00:00 2001 From: damon Date: Sat, 12 Dec 2020 17:55:15 -0800 Subject: [PATCH 6/9] cloudworkers add: use supplied nickname as is --- nucypher/cli/commands/cloudworkers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nucypher/cli/commands/cloudworkers.py b/nucypher/cli/commands/cloudworkers.py index 1ca8a26fe..74433bfeb 100644 --- a/nucypher/cli/commands/cloudworkers.py +++ b/nucypher/cli/commands/cloudworkers.py @@ -139,7 +139,7 @@ def add(general_config, host_address, login_name, key_path, ssh_port, host_nickn """Adds an existing node to the local config for future management.""" emitter = setup_emitter(general_config) - name = f'{namespace}-{network}-{host_nickname}' + name = host_nickname deployer = CloudDeployers.get_deployer('generic')(emitter, None, None, namespace=namespace, network=network, action='add') config = deployer.create_nodes([name], host_address, login_name, key_path, ssh_port) From f7c3d3527b02bf758d56105cbf1282b4ab9e9324 Mon Sep 17 00:00:00 2001 From: damon Date: Sun, 20 Dec 2020 16:21:48 -0800 Subject: [PATCH 7/9] replace os.path.joins with Pathlib.joinpath --- nucypher/utilities/clouddeploy.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nucypher/utilities/clouddeploy.py b/nucypher/utilities/clouddeploy.py index da4a93878..1a9c42b46 100644 --- a/nucypher/utilities/clouddeploy.py +++ b/nucypher/utilities/clouddeploy.py @@ -17,6 +17,7 @@ import copy import os +from pathlib import Path import re import json import maya @@ -265,7 +266,7 @@ class BaseCloudNodeConfigurator: @property def network_config_path(self): - return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, self.network) + return Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, self.network) @property def _provider_deploy_attrs(self): @@ -290,7 +291,7 @@ class BaseCloudNodeConfigurator: @property def inventory_path(self): - return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml') + return str(Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.ansible_inventory.yml')) def update_generate_inventory(self, node_names, **kwargs): @@ -375,7 +376,7 @@ class BaseCloudNodeConfigurator: def deploy_nucypher_on_existing_nodes(self, node_names, wipe_nucypher=False): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/setup_remote_workers.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/setup_remote_workers.yml') # first update any specified input in our node config for k, input_specified_value in self.host_level_overrides.items(): @@ -420,7 +421,7 @@ class BaseCloudNodeConfigurator: def update_nucypher_on_existing_nodes(self, node_names): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/update_remote_workers.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/update_remote_workers.yml') # first update any specified input in our node config for k, input_specified_value in self.host_level_overrides.items(): @@ -460,7 +461,7 @@ class BaseCloudNodeConfigurator: def get_worker_status(self, node_names): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/get_workers_status.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/get_workers_status.yml') self.update_generate_inventory(node_names) @@ -484,7 +485,7 @@ class BaseCloudNodeConfigurator: def print_worker_logs(self, node_names): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/get_worker_logs.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/get_worker_logs.yml') self.update_generate_inventory(node_names) @@ -508,7 +509,7 @@ class BaseCloudNodeConfigurator: def backup_remote_data(self, node_names): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/backup_remote_workers.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/backup_remote_workers.yml') self.update_generate_inventory(node_names) loader = DataLoader() @@ -530,7 +531,7 @@ class BaseCloudNodeConfigurator: def restore_from_backup(self, target_host, source_path): - playbook = os.path.join(DEPLOY_DIR, 'ansible/worker/restore_ursula_from_backup.yml') + playbook = Path(DEPLOY_DIR).joinpath('ansible/worker/restore_ursula_from_backup.yml') self.update_generate_inventory([target_host], restore_path=source_path) @@ -816,7 +817,7 @@ class AWSNodeConfigurator(BaseCloudNodeConfigurator): def _create_keypair(self): new_keypair_data = self.ec2Client.create_key_pair(KeyName=f'{self.namespace_network}') - outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair') + outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair') os.makedirs(os.path.dirname(outpath), exist_ok=True) with open(outpath, 'w') as outfile: outfile.write(new_keypair_data['KeyMaterial']) @@ -829,7 +830,7 @@ class AWSNodeConfigurator(BaseCloudNodeConfigurator): # only use self.namespace here to avoid accidental deletions of pre-existing keypairs deleted_keypair_data = self.ec2Client.delete_key_pair(KeyName=f'{self.namespace_network}') if deleted_keypair_data['HTTPStatusCode'] == 200: - outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair') + outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, f'{self.namespace_network}.awskeypair') os.remove(outpath) self.emitter.echo(f"keypair at {outpath}, was deleted", color='yellow') From 479b9793b98cf4d07594e30010fa80466b259a54 Mon Sep 17 00:00:00 2001 From: damon Date: Sun, 20 Dec 2020 16:56:31 -0800 Subject: [PATCH 8/9] cloudworkers: use constants for envvar names --- nucypher/utilities/clouddeploy.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nucypher/utilities/clouddeploy.py b/nucypher/utilities/clouddeploy.py index 1a9c42b46..98d27ef5d 100644 --- a/nucypher/utilities/clouddeploy.py +++ b/nucypher/utilities/clouddeploy.py @@ -37,7 +37,7 @@ from ansible.executor.playbook_executor import PlaybookExecutor from ansible import context as ansible_context from ansible.module_utils.common.collections import ImmutableDict -from nucypher.config.constants import DEFAULT_CONFIG_ROOT, DEPLOY_DIR +from nucypher.config.constants import DEFAULT_CONFIG_ROOT, DEPLOY_DIR, NUCYPHER_ENVVAR_KEYRING_PASSWORD, NUCYPHER_ENVVAR_WORKER_ETH_PASSWORD from nucypher.blockchain.eth.clients import PUBLIC_CHAINS from nucypher.blockchain.eth.networks import NetworksInventory @@ -301,8 +301,8 @@ class BaseCloudNodeConfigurator: raise KeyError(f"No hosts matched the supplied names: {node_names}. Try `nucypher cloudworkers list-hosts`") default_envvars = [ - ('NUCYPHER_KEYRING_PASSWORD', self.config['keyringpassword']), - ('NUCYPHER_WORKER_ETH_PASSWORD', self.config['ethpassword']), + (NUCYPHER_ENVVAR_KEYRING_PASSWORD, self.config['keyringpassword']), + (NUCYPHER_ENVVAR_WORKER_ETH_PASSWORD, self.config['ethpassword']), ] input_envvars = [(k, v) for k, v in self.envvars] From cda93e72aad8c4f3f819966457a7c8a192145b26 Mon Sep 17 00:00:00 2001 From: damon Date: Sun, 20 Dec 2020 16:57:14 -0800 Subject: [PATCH 9/9] cloudworkers: improved docs --- .../guides/network_node/nucypher_host_management_cli.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/source/guides/network_node/nucypher_host_management_cli.rst b/docs/source/guides/network_node/nucypher_host_management_cli.rst index 2a3c92f3b..7bb81def6 100644 --- a/docs/source/guides/network_node/nucypher_host_management_cli.rst +++ b/docs/source/guides/network_node/nucypher_host_management_cli.rst @@ -60,10 +60,11 @@ Some examples: # --------------------------------------------------------------------------------------------------------------------------- # # NOTE: if no --remote-provider is specified, geth will be run on the host and a larger instance with more RAM will be used. - # this will probably cost more and require some time to sync. + # this will probably cost more and require some time to sync. * A remote provider such as Alchemy or Infura is highly recommended * # --------------------------------------------------------------------------------------------------------------------------- # # on AWS + # configure your local aws cli with named profiles https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html $ nucypher cloudworkers up --cloudprovider aws --aws-profile my-aws-profile --remote-provider http://mainnet.infura..3epifj3rfioj # add your ubuntu machine at the office to an existing locally managed stake @@ -86,6 +87,9 @@ Some examples: # deploy nucypher on all your managed hosts $ nucypher cloudworkers deploy --remote-provider http://mainnet.infura..3epifj3rfioj + # set some environment variables to configure Ursula workers on all your hosts + $ nucypher cloudworkers deploy -e NUCYPHER_MAX_GAS_PRICE_GWEI=35 -e DONT_PERFORM_WORK_ON_SUNDAY=true + # print the current status of all workers across all namespaces (in bash) $ for ns in $(nucypher cloudworkers list-namespaces); do nucypher cloudworkers status --namespace $ns; done > local nickname: Project11-mainnet-2