mirror of https://github.com/nucypher/nucypher.git
Merge pull request #2080 from vepkenez/deploy-foo
Automated cloud deployment for all!pull/2343/head
commit
372b605f35
|
@ -19,13 +19,14 @@
|
|||
- name: Print Result
|
||||
debug:
|
||||
msg:
|
||||
- "{{status_data.json.nickname}}"
|
||||
- "nucypher version: {{status_data.json.version}}"
|
||||
- "staker address: {{status_data.json.staker_address}}"
|
||||
- "worker address: {{status_data.json.worker_address}}"
|
||||
- "rest url: https://{{status_data.json.rest_url}}"
|
||||
- "missing commitments: {{status_data.json.missing_commitments}}"
|
||||
- "last committed period: {{status_data.json.last_committed_period}}"
|
||||
- "balances:"
|
||||
- " ETH: {{status_data.json.balances.eth}}"
|
||||
- " NU: {{status_data.json.balances.nu}}"
|
||||
"{{status_data.json.nickname}}\n
|
||||
nucypher version: {{status_data.json.version}}\n
|
||||
staker address: {{status_data.json.staker_address}}\n
|
||||
worker address: {{status_data.json.worker_address}}\n
|
||||
rest url: https://{{status_data.json.rest_url}}\n
|
||||
missing commitments: {{status_data.json.missing_commitments}}\n
|
||||
last committed period: {{status_data.json.last_committed_period}}\n
|
||||
balances:\n
|
||||
ETH: {{status_data.json.balances.eth}}\n
|
||||
NU: {{status_data.json.balances.nu}}\n
|
||||
{{inventory_hostname}}:worker address:{{status_data.json.worker_address}}\n"
|
||||
|
|
|
@ -12,11 +12,31 @@
|
|||
recurse: yes
|
||||
owner: nucypher
|
||||
|
||||
- name: Stop any running Ursulas
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
docker_container:
|
||||
name: ursula
|
||||
state: stopped
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
|
||||
- name: Keep disk space clean by pruning unneeded docker debris
|
||||
become: yes
|
||||
docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
|
||||
- name: "pull {{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
become: yes
|
||||
docker_image:
|
||||
name: nucypher/nucypher
|
||||
name: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- name: "check if /home/nucypher/nucypher/ursula.json exists"
|
||||
become: yes
|
||||
|
@ -25,8 +45,22 @@
|
|||
path: /home/nucypher/nucypher/ursula.json
|
||||
register: ursula_check
|
||||
|
||||
- name: find keystore file
|
||||
find:
|
||||
paths: "{{geth_dir}}keystore"
|
||||
register: keystore_output
|
||||
|
||||
- name: store signer options
|
||||
set_fact:
|
||||
signer_options: "--signer keystore://{{nucypher_container_geth_datadir}}/keystore/{{keystore_output.files[0].path | basename}}"
|
||||
when: node_is_decentralized is undefined or not node_is_decentralized and ursula_check.stat.exists == False
|
||||
|
||||
- name: store empty signer options
|
||||
set_fact:
|
||||
signer_options: ""
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized and ursula_check.stat.exists == False
|
||||
|
||||
- name: Find my public ip
|
||||
when: ursula_check.stat.exists == False
|
||||
uri:
|
||||
url: http://ifconfig.me/ip
|
||||
return_content: yes
|
||||
|
@ -36,6 +70,6 @@
|
|||
become: yes
|
||||
become_user: nucypher
|
||||
when: ursula_check.stat.exists == False
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYRING_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula init --provider /root/.local/share/geth/.ethereum/goerli/geth.ipc --poa --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}}"
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYRING_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula init --provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}}"
|
||||
environment:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
|
|
|
@ -3,6 +3,13 @@
|
|||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "remove existing nucypher config data"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/nucypher/
|
||||
state: absent
|
||||
when: wipe_nucypher_config is not undefined and wipe_nucypher_config
|
||||
|
||||
- name: "create geth directory"
|
||||
become: yes
|
||||
file:
|
||||
|
@ -56,11 +63,16 @@
|
|||
owner: nucypher
|
||||
when: WORKER_ACCT_KEYSTORE_PATH is undefined and not account_file.stat.exists
|
||||
|
||||
- name: "echo nucypher_container_geth_datadir"
|
||||
debug:
|
||||
verbosity: 0
|
||||
var: nucypher_container_geth_datadir
|
||||
|
||||
# create a local geth account if one doesn't exist
|
||||
- name: "create new account and capture the address"
|
||||
become: yes
|
||||
shell:
|
||||
cmd: 'docker run -v /home/nucypher/geth:/root ethereum/client-go:latest account new --password /root/password.txt --datadir {{geth_container_datadir}} | grep "Public address of the key:" | cut -d":" -f2- | xargs'
|
||||
cmd: 'docker run -v /home/nucypher/geth:/root ethereum/client-go:latest account new --password /root/password.txt --datadir {{geth_container_geth_datadir}} | grep "Public address of the key:" | cut -d":" -f2- | xargs'
|
||||
register: new_geth_account_checksum
|
||||
when: not account_file.stat.exists
|
||||
|
||||
|
@ -94,16 +106,8 @@
|
|||
- name: "echo worker address"
|
||||
debug:
|
||||
verbosity: 0
|
||||
msg: "{{ active_account.stdout }}"
|
||||
msg: "{{inventory_hostname}}:worker address:{{ active_account.stdout }}"
|
||||
|
||||
- name: store worker address
|
||||
set_fact:
|
||||
worker_address: "{{active_account.stdout}}"
|
||||
|
||||
- name: "Confirm worker funds"
|
||||
pause:
|
||||
prompt: |
|
||||
"Confirm ether funding for worker: https://{{etherscan_domain}}/address/{{hostvars[item]['worker_address']}}"
|
||||
------------------------------------------------------------------------------------------------------------------
|
||||
with_items:
|
||||
- "{{ ansible_play_batch }}"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
- name: "Sync/Run Geth"
|
||||
- name: "Sync/Run Geth if we are running a Decentralized node"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
|
@ -25,7 +25,7 @@
|
|||
register: geth_sync_status
|
||||
when: geth_running.stat.exists == False and restarting_geth is undefined
|
||||
|
||||
- name: 'check if geth is finished syncing...'
|
||||
- name: 'checking every few seconds if geth is finished syncing... (this will continue even if you kill this process)'
|
||||
become: yes
|
||||
until: job_result.finished
|
||||
retries: 10000
|
||||
|
|
|
@ -3,22 +3,116 @@
|
|||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "Run Staked Ursula"
|
||||
|
||||
- name: find keystore file
|
||||
find:
|
||||
paths: "{{geth_dir}}keystore"
|
||||
register: keystore_output
|
||||
|
||||
- name: store signer options
|
||||
set_fact:
|
||||
signer_options: "--signer keystore://{{nucypher_container_geth_datadir}}/keystore/{{keystore_output.files[0].path | basename}}"
|
||||
when: node_is_decentralized is undefined or not node_is_decentralized
|
||||
|
||||
- name: store empty signer options
|
||||
set_fact:
|
||||
signer_options: ""
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
|
||||
- name: "remove known nodes"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/nucypher/known_nodes/
|
||||
state: absent
|
||||
|
||||
- name: "get account address from file"
|
||||
become: yes
|
||||
command: 'cat {{geth_dir}}account.txt'
|
||||
register: active_account
|
||||
|
||||
- name: "ensure known nodes certificates directory"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/nucypher/known_nodes/certificates
|
||||
state: directory
|
||||
|
||||
- name: "ensure known nodes directory"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/nucypher/known_nodes/metadata
|
||||
state: directory
|
||||
|
||||
- name: Find my public ip
|
||||
uri:
|
||||
url: http://ifconfig.me/ip
|
||||
return_content: yes
|
||||
register: ip_response
|
||||
|
||||
- name: "update Ursula worker config"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYRING_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula config --provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}} --config-file /root/.local/share/nucypher/ursula.json"
|
||||
environment:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
|
||||
- name: "Run Staked Ursula (seed node)"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
when: SEED_NODE_URI is not undefined and inventory_hostname == SEED_NODE_URI
|
||||
docker_container:
|
||||
recreate: yes
|
||||
name: ursula
|
||||
state: started
|
||||
pull: yes
|
||||
log_driver: json-file
|
||||
log_options:
|
||||
max-size: 10m
|
||||
max-file: "5"
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
restart_policy: "unless-stopped"
|
||||
command: "nucypher ursula run --teacher gemini.nucypher.network:9151 --poa {{nucypher_ursula_run_options | default('')}}"
|
||||
command: "nucypher ursula run {{nucypher_ursula_run_options | default('')}} {{signer_options}} --lonely {{prometheus | default('')}}"
|
||||
volumes:
|
||||
- /home/nucypher:/root/.local/share/
|
||||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}"
|
||||
NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}"
|
||||
NUCYPHER_SENTRY_LOGS: "1"
|
||||
|
||||
- name: "wait a few seconds for the seed node to become available"
|
||||
when: SEED_NODE_URI is not undefined
|
||||
pause:
|
||||
seconds: 15
|
||||
|
||||
- name: "Run Staked Ursula (non-seed)"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
when: SEED_NODE_URI is undefined or inventory_hostname != SEED_NODE_URI
|
||||
docker_container:
|
||||
recreate: yes
|
||||
name: ursula
|
||||
state: started
|
||||
pull: yes
|
||||
log_driver: json-file
|
||||
log_options:
|
||||
max-size: 10m
|
||||
max-file: "5"
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
restart_policy: "unless-stopped"
|
||||
command: "nucypher ursula run {{nucypher_ursula_run_options | default('')}} {{signer_options}} --disable-availability-check --teacher {{SEED_NODE_URI}} {{prometheus | default('')}}"
|
||||
volumes:
|
||||
- /home/nucypher:/root/.local/share/
|
||||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env:
|
||||
NUCYPHER_KEYRING_PASSWORD: "{{NUCYPHER_KEYRING_PASSWORD}}"
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: "{{NUCYPHER_WORKER_ETH_PASSWORD}}"
|
||||
NUCYPHER_SENTRY_DSN: "{{SENTRY_DSN | default('')}}"
|
||||
NUCYPHER_SENTRY_LOGS: "1"
|
||||
|
||||
- name: "wait a few seconds Ursula to startup"
|
||||
pause:
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
- name: "Stop Geth and Ursula Containers if they are running"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Stop Ursula
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
docker_container:
|
||||
name: ursula
|
||||
state: stopped
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
|
||||
- set_fact: restarting_geth=True
|
||||
- name: Stop Geth
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth
|
||||
state: stopped
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
|
@ -4,6 +4,14 @@
|
|||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: Keep disk space clean by pruning unneeded docker debris
|
||||
become: yes
|
||||
docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
networks: yes
|
||||
builder_cache: yes
|
||||
|
||||
- name: "pull {{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
become: yes
|
||||
docker_image:
|
||||
|
@ -11,19 +19,7 @@
|
|||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- name: Stop Ursula
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
docker_container:
|
||||
name: ursula
|
||||
state: stopped
|
||||
|
||||
- set_fact: restarting_geth=True
|
||||
- name: Stop Geth
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth
|
||||
state: stopped
|
||||
|
||||
- import_playbook: stop_containers.yml
|
||||
- import_playbook: run_geth.yml
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
- import_playbook: run_ursula.yml
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
- import_playbook: include/setup_docker.yml
|
||||
- import_playbook: include/init_worker.yml
|
||||
- import_playbook: include/run_geth.yml
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
- import_playbook: include/init_ursula.yml
|
||||
- import_playbook: include/run_ursula.yml
|
||||
- import_playbook: include/check_running_ursula.yml
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
- name: "Update Remote Workers to latest NuCypher, Geth"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/stop_containers.yml
|
|
@ -20,6 +20,7 @@ comfort level:
|
|||
|
||||
staking_guide
|
||||
ursula_configuration_guide
|
||||
nucypher_host_management_cli
|
||||
|
||||
Here is a simple heuristic to help decide on a course of action:
|
||||
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
.. _managing-cloud-workers:
|
||||
|
||||
===================================================
|
||||
Nucypher CLI tools for running and managing workers
|
||||
===================================================
|
||||
|
||||
Cloudworkers CLI
|
||||
----------------
|
||||
|
||||
Nucypher maintains some simple tools leveraging open source tools such as Ansible, to make it easy
|
||||
to keep your Nucypher Ursula nodes working and up to date.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
(nucypher)$ nucypher cloudworkers ACTION [OPTIONS]
|
||||
|
||||
**Cloudworkers Command Actions**
|
||||
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
| Action | Description |
|
||||
+======================+===============================================================================+
|
||||
| ``up`` | Create hosts, configure and deploy an Ursula on AWS or Digital Ocean |
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
| ``add`` | Add an existing host to be managed by cloudworkers CLI tools |
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
| ``deploy`` | Update and deploy Ursula on existing hosts. |
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
| ``destroy`` | Shut down and cleanup resources deployed on AWS or Digital Ocean |
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
| ``status`` | Query the status of all managed hosts. |
|
||||
+----------------------+-------------------------------------------------------------------------------+
|
||||
|
||||
|
||||
Some examples:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# You have created stakes already. Now run an Ursula for each one
|
||||
|
||||
# on Digital Ocean
|
||||
$ export DIGITALOCEAN_ACCESS_TOKEN=<your access token>
|
||||
$ export DIGITALOCEAN_REGION=<a digitalocean availability region>
|
||||
$ nucypher cloudworkers up --cloudprovider digitalocean --remote-provider http://mainnet.infura..3epifj3rfioj
|
||||
|
||||
# --------------------------------------------------------------------------------------------------------------------------- #
|
||||
# NOTE: if no --remote-provider is specified, geth will be run on the host and a larger instance with more RAM will be used.
|
||||
# this will probably cost more and require some time to sync.
|
||||
# --------------------------------------------------------------------------------------------------------------------------- #
|
||||
|
||||
# on AWS
|
||||
$ nucypher cloudworkers up --cloudprovider aws --aws-profile my-aws-profile --remote-provider http://mainnet.infura..3epifj3rfioj
|
||||
|
||||
# add your ubuntu machine at the office
|
||||
$ nucypher cloudworkers add --staker-address 0x9a92354D3811938A1f35644825188cAe3103bA8e --host-address somebox.myoffice.net --login-name root --key-path ~/.ssh/id_rsa
|
||||
|
||||
# deploy or update all your existing hosts to the latest code
|
||||
$ nucypher cloudworkers deploy --nucypher-image nucypher/nucypher:latest
|
||||
|
||||
# change two of your existing hosts to use alchemy instead of infura as a delegated blockchain
|
||||
$ nucypher cloudworkers deploy --remote-provider wss://eth-mainnet.ws.alchemyapi.io/v2/aodfh298fh2398fh2398hf3924f... --include-stakeholder 0x9a92354D3811938A1f35644825188cAe3103bA8e --include-stakeholder 0x1Da644825188cAe3103bA8e92354D3811938A1f35
|
|
@ -1815,11 +1815,13 @@ class StakeHolder(Staker):
|
|||
initial_address: str = None,
|
||||
checksum_addresses: set = None,
|
||||
signer: Signer = None,
|
||||
worker_data: dict = None,
|
||||
*args, **kwargs):
|
||||
|
||||
self.staking_interface_agent = None
|
||||
|
||||
super().__init__(is_me=is_me, *args, **kwargs)
|
||||
self.worker_data = worker_data
|
||||
self.log = Logger(f"stakeholder")
|
||||
|
||||
# Wallet
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
"""
|
||||
This file is part of nucypher.
|
||||
|
||||
nucypher is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
nucypher is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import click
|
||||
|
||||
try:
|
||||
from nucypher.utilities.clouddeploy import CloudDeployers
|
||||
except ImportError:
|
||||
CloudDeployers = None
|
||||
from nucypher.cli.utils import setup_emitter
|
||||
from nucypher.config.characters import StakeHolderConfiguration
|
||||
from nucypher.cli.commands.stake import group_staker_options, option_config_file, group_general_config
|
||||
|
||||
|
||||
def filter_staker_addresses(stakers, stakes):
|
||||
|
||||
staker_addresses = set()
|
||||
for staker in stakers:
|
||||
|
||||
for stake in staker.stakes:
|
||||
if stakes:
|
||||
if not stake.staker_address in stakes:
|
||||
continue
|
||||
staker_addresses.add(stake.staker_address)
|
||||
return staker_addresses
|
||||
|
||||
|
||||
@click.group()
|
||||
def cloudworkers():
|
||||
"""Manage stakes and other staker-related operations."""
|
||||
|
||||
@cloudworkers.command('up')
|
||||
@group_staker_options
|
||||
@option_config_file
|
||||
@click.option('--cloudprovider', help="aws or digitalocean", default='aws')
|
||||
@click.option('--aws-profile', help="The cloud provider account profile you'd like to use (an aws profile)", default=None)
|
||||
@click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided, nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes. (default is nucypher/nucypher:latest)", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--include-stakeholder', 'stakes', help="limit worker to specified stakeholder addresses", multiple=True)
|
||||
@click.option('--wipe', help="Clear nucypher configs on existing nodes and start a fresh node with new keys.", default=False, is_flag=True)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@group_general_config
|
||||
def up(general_config, staker_options, config_file, cloudprovider, aws_profile, remote_provider, nucypher_image, seed_network, sentry_dsn, stakes, wipe, prometheus):
|
||||
"""Creates workers for all stakes owned by the user for the given network."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
||||
if not CloudDeployers:
|
||||
emitter.echo("Ansible is required to use this command. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
STAKEHOLDER = staker_options.create_character(emitter, config_file)
|
||||
|
||||
stakers = STAKEHOLDER.get_stakers()
|
||||
if not stakers:
|
||||
emitter.echo("No staking accounts found.")
|
||||
return
|
||||
|
||||
staker_addresses = filter_staker_addresses(stakers, stakes)
|
||||
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file, remote_provider, nucypher_image, seed_network, sentry_dsn, aws_profile, prometheus)
|
||||
config = deployer.create_nodes_for_stakers(staker_addresses)
|
||||
|
||||
if config.get('instances') and len(config.get('instances')) >= len(staker_addresses):
|
||||
emitter.echo('Nodes exist for all requested stakes', color="yellow")
|
||||
deployer.deploy_nucypher_on_existing_nodes(staker_addresses, wipe_nucypher=wipe)
|
||||
|
||||
|
||||
@cloudworkers.command('add')
|
||||
@group_staker_options
|
||||
@option_config_file
|
||||
@click.option('--staker-address', help="The staker account address for whom you are adding a worker host.", required=True)
|
||||
@click.option('--host-address', help="The IP address or Hostname of the host you are adding.", required=True)
|
||||
@click.option('--login-name', help="The name username of a user with root privileges we can ssh as on the host.", required=True)
|
||||
@click.option('--key-path', help="The path to a keypair we will need to ssh into this host", default="~/.ssh/id_rsa.pub")
|
||||
@click.option('--ssh-port', help="The port this host's ssh daemon is listening on", default=22)
|
||||
@group_general_config
|
||||
def add(general_config, staker_options, config_file, staker_address, host_address, login_name, key_path, ssh_port):
|
||||
"""Creates workers for all stakes owned by the user for the given network."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
||||
STAKEHOLDER = staker_options.create_character(emitter, config_file)
|
||||
|
||||
stakers = STAKEHOLDER.get_stakers()
|
||||
if not stakers:
|
||||
emitter.echo("No staking accounts found.")
|
||||
return
|
||||
|
||||
staker_addresses = filter_staker_addresses(stakers, [staker_address])
|
||||
if not staker_addresses:
|
||||
emitter.echo(f"Could not find staker address: {staker_address} among your stakes. (try `nucypher stake --list`)", color="red")
|
||||
return
|
||||
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
|
||||
deployer = CloudDeployers.get_deployer('generic')(emitter, STAKEHOLDER, config_file)
|
||||
config = deployer.create_nodes_for_stakers(staker_addresses, host_address, login_name, key_path, ssh_port)
|
||||
|
||||
|
||||
|
||||
@cloudworkers.command('deploy')
|
||||
@group_staker_options
|
||||
@option_config_file
|
||||
@click.option('--remote-provider', help="The blockchain provider for the remote node, if not provided nodes will run geth.", default=None)
|
||||
@click.option('--nucypher-image', help="The docker image containing the nucypher code to run on the remote nodes.", default=None)
|
||||
@click.option('--seed-network', help="Do you want the 1st node to be --lonely and act as a seed node for this network", default=False, is_flag=True)
|
||||
@click.option('--sentry-dsn', help="a sentry dsn for these workers (https://sentry.io/)", default=None)
|
||||
@click.option('--include-stakeholder', 'stakes', help="limit worker to specified stakeholder addresses", multiple=True)
|
||||
@click.option('--wipe', help="Clear your nucypher config and start a fresh node with new kets", default=False, is_flag=True)
|
||||
@click.option('--prometheus', help="Run Prometheus on workers.", default=False, is_flag=True)
|
||||
@group_general_config
|
||||
def deploy(general_config, staker_options, config_file, remote_provider, nucypher_image, seed_network, sentry_dsn, stakes, wipe, prometheus):
|
||||
"""Deploys NuCypher on existing hardware."""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
|
||||
if not CloudDeployers:
|
||||
emitter.echo("Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
STAKEHOLDER = staker_options.create_character(emitter, config_file)
|
||||
|
||||
stakers = STAKEHOLDER.get_stakers()
|
||||
if not stakers:
|
||||
emitter.echo("No staking accounts found.")
|
||||
return
|
||||
|
||||
staker_addresses = filter_staker_addresses(stakers, stakes)
|
||||
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
|
||||
deployer = CloudDeployers.get_deployer('generic')(emitter, STAKEHOLDER, config_file, remote_provider, nucypher_image, seed_network, sentry_dsn, prometheus=prometheus)
|
||||
|
||||
emitter.echo("found nodes for the following stakers:")
|
||||
for staker_address in staker_addresses:
|
||||
if deployer.config['instances'].get(staker_address):
|
||||
data = deployer.config['instances'].get(staker_address)
|
||||
emitter.echo(f'\t{staker_address}: {data["publicaddress"]}', color="yellow")
|
||||
deployer.deploy_nucypher_on_existing_nodes(staker_addresses, wipe_nucypher=wipe)
|
||||
|
||||
|
||||
@cloudworkers.command('destroy')
|
||||
@group_staker_options
|
||||
@option_config_file
|
||||
@click.option('--cloudprovider', help="aws or digitalocean")
|
||||
@click.option('--include-stakeholder', 'stakes', help="one or more stakeholder addresses to whom we should limit worker destruction", multiple=True)
|
||||
@group_general_config
|
||||
def destroy(general_config, staker_options, config_file, cloudprovider, stakes):
|
||||
"""Cleans up all previously created resources for the given netork for the cloud providern"""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
if not CloudDeployers:
|
||||
emitter.echo("Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
STAKEHOLDER = staker_options.create_character(emitter, config_file)
|
||||
|
||||
stakers = STAKEHOLDER.get_stakers()
|
||||
if not stakers:
|
||||
emitter.echo("No staking accounts found.")
|
||||
return
|
||||
|
||||
staker_addresses = filter_staker_addresses(stakers, stakes)
|
||||
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file)
|
||||
deployer.destroy_resources(staker_addresses=staker_addresses)
|
||||
|
||||
|
||||
@cloudworkers.command('status')
|
||||
@group_staker_options
|
||||
@option_config_file
|
||||
@click.option('--cloudprovider', help="aws or digitalocean", default='aws')
|
||||
@click.option('--include-stakeholder', 'stakes', help="only show nodes for included stakeholder addresses", multiple=True)
|
||||
@group_general_config
|
||||
def status(general_config, staker_options, config_file, cloudprovider, stakes):
|
||||
"""Displays worker status and updates worker data in stakeholder config"""
|
||||
|
||||
emitter = setup_emitter(general_config)
|
||||
if not CloudDeployers:
|
||||
emitter.echo("Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)", color="red")
|
||||
return
|
||||
STAKEHOLDER = staker_options.create_character(emitter, config_file)
|
||||
config_file = config_file or StakeHolderConfiguration.default_filepath()
|
||||
deployer = CloudDeployers.get_deployer(cloudprovider)(emitter, STAKEHOLDER, config_file)
|
||||
|
||||
stakers = STAKEHOLDER.get_stakers()
|
||||
staker_addresses = filter_staker_addresses(stakers, stakes)
|
||||
|
||||
deployer.get_worker_status(staker_addresses)
|
|
@ -17,7 +17,7 @@ along with nucypher. If not, see <https://www.gnu.org/licenses/>.
|
|||
|
||||
import click
|
||||
|
||||
from nucypher.cli.commands import alice, bob, dao, enrico, felix, multisig, stake, status, ursula, worklock
|
||||
from nucypher.cli.commands import alice, bob, dao, enrico, felix, multisig, stake, status, ursula, worklock, cloudworkers
|
||||
from nucypher.cli.painting.help import echo_version
|
||||
|
||||
|
||||
|
@ -65,7 +65,8 @@ ENTRY_POINTS = (
|
|||
status.status, # Network Status
|
||||
felix.felix, # Faucet
|
||||
multisig.multisig, # MultiSig operations
|
||||
worklock.worklock # WorkLock
|
||||
worklock.worklock, # WorkLock
|
||||
cloudworkers.cloudworkers #Remote Worker node management
|
||||
)
|
||||
|
||||
for entry_point in ENTRY_POINTS:
|
||||
|
|
|
@ -45,7 +45,7 @@ def paint_all_stakes(emitter: StdoutEmitter,
|
|||
# TODO: Something with non-staking accounts?
|
||||
continue
|
||||
|
||||
paint_stakes(emitter=emitter, staker=staker, paint_unlocked=paint_unlocked)
|
||||
paint_stakes(emitter=emitter, staker=staker, paint_unlocked=paint_unlocked, stakeholder=stakeholder)
|
||||
total_stakers += 1
|
||||
|
||||
if not total_stakers:
|
||||
|
@ -55,7 +55,8 @@ def paint_all_stakes(emitter: StdoutEmitter,
|
|||
def paint_stakes(emitter: StdoutEmitter,
|
||||
staker: 'Staker',
|
||||
stakes: List[Stake] = None,
|
||||
paint_unlocked: bool = False) -> None:
|
||||
paint_unlocked: bool = False,
|
||||
stakeholder=None) -> None:
|
||||
|
||||
stakes = stakes or staker.sorted_stakes()
|
||||
|
||||
|
@ -85,6 +86,14 @@ def paint_stakes(emitter: StdoutEmitter,
|
|||
emitter.echo(f"Staker {staker.checksum_address} ════", bold=True, color='red' if missing else 'green')
|
||||
worker = staker.worker_address if staker.worker_address != NULL_ADDRESS else 'not bonded'
|
||||
emitter.echo(f"Worker {worker} ════", color='red' if staker.worker_address == NULL_ADDRESS else None)
|
||||
if stakeholder and stakeholder.worker_data:
|
||||
worker_data = stakeholder.worker_data.get(staker.checksum_address)
|
||||
if worker_data:
|
||||
emitter.echo(f"\t public address: {worker_data['publicaddress']}")
|
||||
if worker_data.get('nucypher version'):
|
||||
emitter.echo(f"\t NuCypher Version: {worker_data['nucypher version']}")
|
||||
if worker_data.get('blockchain_provider'):
|
||||
emitter.echo(f"\t Blockchain Provider: {worker_data['blockchain_provider']}")
|
||||
emitter.echo(tabulate.tabulate(zip(STAKER_TABLE_COLUMNS, staker_data), floatfmt="fancy_grid"))
|
||||
|
||||
rows = list()
|
||||
|
|
|
@ -266,7 +266,9 @@ class StakeHolderConfiguration(CharacterConfiguration):
|
|||
domain=self.domain,
|
||||
# TODO: Move empty collection casting to base
|
||||
checksum_addresses=self.checksum_addresses or list(),
|
||||
signer_uri=self.signer_uri)
|
||||
signer_uri=self.signer_uri,
|
||||
worker_data=self.worker_data
|
||||
)
|
||||
|
||||
if self.registry_filepath:
|
||||
payload.update(dict(registry_filepath=self.registry_filepath))
|
||||
|
|
|
@ -117,7 +117,10 @@ class CharacterConfiguration(BaseConfiguration):
|
|||
|
||||
# Registry
|
||||
registry: BaseContractRegistry = None,
|
||||
registry_filepath: str = None):
|
||||
registry_filepath: str = None,
|
||||
|
||||
# Deployed Workers
|
||||
worker_data: dict = None):
|
||||
|
||||
self.log = Logger(self.__class__.__name__)
|
||||
UNINITIALIZED_CONFIGURATION.bool_value(False)
|
||||
|
@ -165,6 +168,9 @@ class CharacterConfiguration(BaseConfiguration):
|
|||
self.config_file_location = filepath or UNINITIALIZED_CONFIGURATION
|
||||
self.config_root = UNINITIALIZED_CONFIGURATION
|
||||
|
||||
# Deployed Workers
|
||||
self.worker_data = worker_data
|
||||
|
||||
#
|
||||
# Federated vs. Blockchain arguments consistency
|
||||
#
|
||||
|
|
|
@ -0,0 +1,815 @@
|
|||
"""
|
||||
This file is part of nucypher.
|
||||
|
||||
nucypher is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
nucypher is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import maya
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from jinja2 import Template
|
||||
import requests
|
||||
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.inventory.manager import InventoryManager
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.vars.manager import VariableManager
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.executor.playbook_executor import PlaybookExecutor
|
||||
from ansible import context as ansible_context
|
||||
from ansible.module_utils.common.collections import ImmutableDict
|
||||
|
||||
from nucypher.config.constants import DEFAULT_CONFIG_ROOT
|
||||
from nucypher.blockchain.eth.clients import PUBLIC_CHAINS
|
||||
from nucypher.blockchain.eth.networks import NetworksInventory
|
||||
|
||||
NODE_CONFIG_STORAGE_KEY = 'worker-configs'
|
||||
URSULA_PORT = 9151
|
||||
PROMETHEUS_PORTS = [9101]
|
||||
|
||||
|
||||
ansible_context.CLIARGS = ImmutableDict(
|
||||
{
|
||||
'syntax': False,
|
||||
'start_at_task': None,
|
||||
'verbosity': 0,
|
||||
'become_method': 'sudo'
|
||||
}
|
||||
)
|
||||
|
||||
class AnsiblePlayBookResultsCollector(CallbackBase):
|
||||
"""
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, sock, *args, return_results=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.playbook_results = []
|
||||
self.sock = sock
|
||||
self.results = return_results
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
name = play.get_name().strip()
|
||||
if not name:
|
||||
msg = '\nPLAY {}\n'.format('*' * 100)
|
||||
else:
|
||||
msg = '\nPLAY [{}] {}\n'.format(name, '*' * 100)
|
||||
self.send_save(msg)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
msg = '\nTASK [{}] {}\n'.format(task.get_name(), '*' * 100)
|
||||
self.send_save(msg)
|
||||
|
||||
def v2_runner_on_ok(self, result, *args, **kwargs):
|
||||
if result.is_changed():
|
||||
data = '[{}]=> changed'.format(result._host.name)
|
||||
else:
|
||||
data = '[{}]=> ok'.format(result._host.name)
|
||||
self.send_save(data, color='yellow' if result.is_changed() else 'green')
|
||||
if 'msg' in result._task_fields['args']:
|
||||
msg = result._task_fields['args']['msg']
|
||||
self.send_save(msg, color='yellow')
|
||||
self.send_save('\n')
|
||||
if self.results:
|
||||
for k in self.results.keys():
|
||||
regex = fr'{k}:\s*(?P<data>.*)'
|
||||
match = re.search(regex, msg, flags=re.MULTILINE)
|
||||
if match:
|
||||
self.results[k].append((result._host.name, match.groupdict()['data']))
|
||||
|
||||
|
||||
def v2_runner_on_failed(self, result, *args, **kwargs):
|
||||
if 'changed' in result._result:
|
||||
del result._result['changed']
|
||||
data = 'fail: [{}]=> {}: {}'.format(result._host.name, 'failed',
|
||||
self._dump_results(result._result))
|
||||
self.send_save(data, color='red')
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
if 'changed' in result._result:
|
||||
del result._result['changed']
|
||||
data = '[{}]=> {}: {}'.format(result._host.name, 'unreachable',
|
||||
self._dump_results(result._result))
|
||||
self.send_save(data)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
if 'changed' in result._result:
|
||||
del result._result['changed']
|
||||
data = '[{}]=> {}: {}'.format(
|
||||
result._host.name,
|
||||
'skipped',
|
||||
self._dump_results(result._result)
|
||||
)
|
||||
self.send_save(data, color='blue')
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
hosts = sorted(stats.processed.keys())
|
||||
data = '\nPLAY RECAP {}\n'.format('*' * 100)
|
||||
self.send_save(data)
|
||||
for h in hosts:
|
||||
s = stats.summarize(h)
|
||||
msg = '{} : ok={} changed={} unreachable={} failed={} skipped={}'.format(
|
||||
h, s['ok'], s['changed'], s['unreachable'], s['failures'], s['skipped'])
|
||||
self.send_save(msg)
|
||||
|
||||
def send_save(self, data, color=None):
|
||||
self.sock.echo(data, color=color)
|
||||
self.playbook_results.append(data)
|
||||
|
||||
|
||||
class BaseCloudNodeConfigurator:
|
||||
|
||||
PROMETHEUS_PORT = PROMETHEUS_PORTS[0]
|
||||
|
||||
def __init__(self,
|
||||
emitter,
|
||||
stakeholder,
|
||||
stakeholder_config_path,
|
||||
blockchain_provider=None,
|
||||
nucypher_image=None,
|
||||
seed_network=False,
|
||||
sentry_dsn=None,
|
||||
profile=None,
|
||||
prometheus=False,
|
||||
):
|
||||
|
||||
self.emitter = emitter
|
||||
self.stakeholder = stakeholder
|
||||
self.config_filename = f'{self.stakeholder.network}.json'
|
||||
self.network = self.stakeholder.network
|
||||
self.created_new_nodes = False
|
||||
|
||||
# the keys in this dict are used as search patterns by the anisble result collector and it will return
|
||||
# these values for each node if it happens upon them in some output
|
||||
self.output_capture = {'worker address': [], 'rest url': [], 'nucypher version': [], 'nickname': []}
|
||||
|
||||
# where we save our state data so we can remember the resources we created for future use
|
||||
self.config_path = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, self.config_filename)
|
||||
|
||||
self.emitter.echo(f"cloudworker config path: {self.config_path}")
|
||||
|
||||
if os.path.exists(self.config_path):
|
||||
self.config = json.load(open(self.config_path))
|
||||
self.namespace = self.config['namespace']
|
||||
else:
|
||||
self.namespace = f'{self.stakeholder.network}-{maya.now().date.isoformat()}'
|
||||
self.config = {
|
||||
"namespace": self.namespace,
|
||||
"keyringpassword": b64encode(os.urandom(64)).decode('utf-8'),
|
||||
"ethpassword": b64encode(os.urandom(64)).decode('utf-8'),
|
||||
}
|
||||
configdir = os.path.dirname(self.config_path)
|
||||
os.makedirs(configdir, exist_ok=True)
|
||||
|
||||
# configure provider specific attributes
|
||||
self._configure_provider_params(profile)
|
||||
|
||||
# if certain config options have been specified with this invocation,
|
||||
# save these to update host specific variables before deployment
|
||||
# to allow for individual host config differentiation
|
||||
self.host_level_overrides = {
|
||||
'blockchain_provider': blockchain_provider,
|
||||
'nucypher_image': nucypher_image,
|
||||
'sentry_dsn': sentry_dsn
|
||||
}
|
||||
|
||||
self.config['blockchain_provider'] = blockchain_provider or self.config.get('blockchain_provider') or f'/root/.local/share/geth/.ethereum/{self.chain_name}/geth.ipc' # the default for nodes that run their own geth container
|
||||
self.config['nucypher_image'] = nucypher_image or self.config.get('nucypher_image') or 'nucypher/nucypher:latest'
|
||||
self.config['sentry_dsn'] = sentry_dsn or self.config.get('sentry_dsn')
|
||||
self.config['seed_network'] = seed_network or self.config.get('seed_network')
|
||||
if not self.config['seed_network']:
|
||||
self.config.pop('seed_node', None)
|
||||
self.nodes_are_decentralized = 'geth.ipc' in self.config['blockchain_provider']
|
||||
self.config['stakeholder_config_file'] = stakeholder_config_path
|
||||
self.config['use-prometheus'] = prometheus
|
||||
|
||||
self._write_config()
|
||||
|
||||
def _write_config(self):
|
||||
with open(self.config_path, 'w') as outfile:
|
||||
json.dump(self.config, outfile, indent=4)
|
||||
|
||||
@property
|
||||
def _provider_deploy_attrs(self):
|
||||
return []
|
||||
|
||||
def _configure_provider_params(self, provider_profile):
|
||||
pass
|
||||
|
||||
def _do_setup_for_instance_creation(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def chain_id(self):
|
||||
return NetworksInventory.get_ethereum_chain_id(self.network)
|
||||
|
||||
@property
|
||||
def chain_name(self):
|
||||
try:
|
||||
return PUBLIC_CHAINS[self.chain_id].lower()
|
||||
except KeyError:
|
||||
self.emitter.echo(f"could not identify public blockchain for {self.network}", color="red")
|
||||
|
||||
@property
|
||||
def inventory_path(self):
|
||||
return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace}.ansible_inventory.yml')
|
||||
|
||||
def generate_ansible_inventory(self, staker_addresses, wipe_nucypher=False):
|
||||
|
||||
status_template = Template(self._inventory_template)
|
||||
|
||||
inventory_content = status_template.render(
|
||||
deployer=self,
|
||||
nodes=[value for key, value in self.config['instances'].items() if key in staker_addresses],
|
||||
wipe_nucypher=wipe_nucypher
|
||||
)
|
||||
|
||||
|
||||
with open(self.inventory_path, 'w') as outfile:
|
||||
outfile.write(inventory_content)
|
||||
self._write_config()
|
||||
|
||||
return self.inventory_path
|
||||
|
||||
def create_nodes_for_stakers(self, stakers):
|
||||
count = len(stakers)
|
||||
self.emitter.echo(f"ensuring cloud nodes exist for the following {count} stakers:")
|
||||
for s in stakers:
|
||||
self.emitter.echo(f'\t{s}')
|
||||
time.sleep(3)
|
||||
self._do_setup_for_instance_creation()
|
||||
|
||||
if not self.config.get('instances'):
|
||||
self.config['instances'] = {}
|
||||
|
||||
for address in stakers:
|
||||
existing_node = self.config['instances'].get(address)
|
||||
if not existing_node:
|
||||
self.emitter.echo(f'creating new node for {address}', color='yellow')
|
||||
time.sleep(3)
|
||||
node_data = self.create_new_node_for_staker(address)
|
||||
node_data['provider'] = self.provider_name
|
||||
self.config['instances'][address] = node_data
|
||||
if self.config['seed_network'] and not self.config.get('seed_node'):
|
||||
self.config['seed_node'] = node_data['publicaddress']
|
||||
self._write_config()
|
||||
self.created_new_nodes = True
|
||||
|
||||
return self.config
|
||||
|
||||
@property
|
||||
def _inventory_template(self):
|
||||
return open(os.path.join(os.path.dirname(__file__), 'templates', 'cloud_deploy_ansible_inventory.j2'), 'r').read()
|
||||
|
||||
def deploy_nucypher_on_existing_nodes(self, staker_addresses, wipe_nucypher=False):
|
||||
|
||||
# first update any specified input in our node config
|
||||
for k, input_specified_value in self.host_level_overrides.items():
|
||||
for address in staker_addresses:
|
||||
if self.config['instances'].get(address):
|
||||
# if an instance already has a specified value, we only override
|
||||
# it if that value was input for this command invocation
|
||||
if input_specified_value:
|
||||
self.config['instances'][address][k] = input_specified_value
|
||||
elif not self.config['instances'][address].get(k):
|
||||
self.config['instances'][address][k] = self.config[k]
|
||||
|
||||
self._write_config()
|
||||
|
||||
if self.created_new_nodes:
|
||||
self.emitter.echo("--- Giving newly created nodes some time to get ready ----")
|
||||
time.sleep(30)
|
||||
self.emitter.echo('Running ansible deployment for all running nodes.', color='green')
|
||||
|
||||
self.emitter.echo(f"using inventory file at {self.inventory_path}", color='yellow')
|
||||
if self.config.get('keypair_path'):
|
||||
self.emitter.echo(f"using keypair file at {self.config['keypair_path']}", color='yellow')
|
||||
|
||||
self.generate_ansible_inventory(staker_addresses, wipe_nucypher=wipe_nucypher)
|
||||
|
||||
results = self.output_capture
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
callback = AnsiblePlayBookResultsCollector(sock=self.emitter, return_results=self.output_capture)
|
||||
variable_manager = VariableManager(loader=loader, inventory=inventory)
|
||||
|
||||
executor = PlaybookExecutor(
|
||||
playbooks = ['deploy/ansible/worker/setup_remote_workers.yml'],
|
||||
inventory=inventory,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
passwords=dict(),
|
||||
)
|
||||
executor._tqm._stdout_callback = callback
|
||||
executor.run()
|
||||
|
||||
self.update_captured_instance_data(self.output_capture)
|
||||
self.give_helpful_hints()
|
||||
|
||||
|
||||
def get_worker_status(self, staker_addresses):
|
||||
|
||||
self.emitter.echo('Running ansible status playbook.', color='green')
|
||||
self.emitter.echo('If something goes wrong, it is generally safe to ctrl-c and run the previous command again.')
|
||||
|
||||
self.emitter.echo(f"using inventory file at {self.inventory_path}", color='yellow')
|
||||
if self.config.get('keypair_path'):
|
||||
self.emitter.echo(f"using keypair file at {self.config['keypair_path']}", color='yellow')
|
||||
|
||||
|
||||
self.generate_ansible_inventory(staker_addresses)
|
||||
|
||||
loader = DataLoader()
|
||||
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
|
||||
callback = AnsiblePlayBookResultsCollector(sock=self.emitter, return_results=self.output_capture)
|
||||
variable_manager = VariableManager(loader=loader, inventory=inventory)
|
||||
|
||||
executor = PlaybookExecutor(
|
||||
playbooks = ['deploy/ansible/worker/get_workers_status.yml'],
|
||||
inventory=inventory,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
passwords=dict(),
|
||||
)
|
||||
executor._tqm._stdout_callback = callback
|
||||
executor.run()
|
||||
self.update_captured_instance_data(self.output_capture)
|
||||
|
||||
self.give_helpful_hints()
|
||||
|
||||
def get_provider_hosts(self):
|
||||
return [
|
||||
(address, host_data) for address, host_data in self.config['instances'].items()
|
||||
if host_data['provider'] == self.provider_name
|
||||
]
|
||||
|
||||
def destroy_resources(self, staker_addresses=None):
|
||||
addresses = [s for s in staker_addresses if s in self.get_provider_hosts()]
|
||||
if self._destroy_resources(addresses):
|
||||
self.emitter.echo(f"deleted all requested resources for {self.provider_name}. We are clean. No money is being spent.", color="green")
|
||||
|
||||
def _destroy_resources(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def update_captured_instance_data(self, results):
|
||||
instances_by_public_address = {d['publicaddress']: d for d in self.config['instances'].values()}
|
||||
for k, data in results.items():
|
||||
# results are keyed by 'publicaddress' in config data
|
||||
for instance_address, value in data:
|
||||
instances_by_public_address[instance_address][k] = value
|
||||
|
||||
for k, v in self.config['instances'].items():
|
||||
if instances_by_public_address.get(v['publicaddress']):
|
||||
self.config['instances'][k] = instances_by_public_address.get(v['publicaddress'])
|
||||
|
||||
self._write_config()
|
||||
self.update_stakeholder_config()
|
||||
|
||||
def update_stakeholder_config(self):
|
||||
data = {}
|
||||
data = json.loads(open(self.config['stakeholder_config_file'], 'r').read())
|
||||
existing_worker_data = data.get('worker_data', {})
|
||||
existing_worker_data.update(self.config['instances'])
|
||||
data['worker_data'] = existing_worker_data
|
||||
with open(self.config['stakeholder_config_file'], 'w') as outfile:
|
||||
json.dump(data, outfile, indent=4)
|
||||
|
||||
def give_helpful_hints(self):
|
||||
|
||||
if self.config.get('keypair_path'):
|
||||
keypair = self.config['keypair_path']
|
||||
self.emitter.echo(f'ssh into any node using `ssh ubuntu@<node address> -i "{keypair}"`', color="yellow")
|
||||
|
||||
class DigitalOceanConfigurator(BaseCloudNodeConfigurator):
|
||||
|
||||
default_region = 'SFO3'
|
||||
provider_name = 'digitalocean'
|
||||
|
||||
@property
|
||||
def instance_size(self):
|
||||
if self.nodes_are_decentralized:
|
||||
return 's-2vcpu-4gb'
|
||||
return "s-1vcpu-2gb"
|
||||
|
||||
@property
|
||||
def _provider_deploy_attrs(self):
|
||||
return [
|
||||
{'key': 'default_user', 'value': 'root'},
|
||||
]
|
||||
|
||||
def _configure_provider_params(self, provider_profile):
|
||||
self.token = os.getenv('DIGITALOCEAN_ACCESS_TOKEN')
|
||||
if not self.token:
|
||||
self.emitter.echo(f"Please `export DIGITALOCEAN_ACCESS_TOKEN=<your access token.>` from here: https://cloud.digitalocean.com/account/api/tokens", color="red")
|
||||
raise AttributeError("Could not continue without DIGITALOCEAN_ACCESS_TOKEN environment variable.")
|
||||
self.region = os.getenv('DIGITALOCEAN_REGION') or self.config.get('region')
|
||||
if not self.region:
|
||||
self.region = self.default_region
|
||||
self.config['region'] = self.region
|
||||
self.emitter.echo(f'using DigitalOcean region: {self.region}, to change regions `export DIGITALOCEAN_REGION: https://www.digitalocean.com/docs/platform/availability-matrix/', color='yellow')
|
||||
|
||||
self.sshkey = os.getenv('DIGITAL_OCEAN_KEY_FINGERPRINT') or self.config.get('sshkey')
|
||||
if not self.sshkey:
|
||||
self.emitter.echo("Please set the name of your Digital Ocean SSH Key (`export DIGITAL_OCEAN_KEY_FINGERPRINT=<your preferred ssh key fingerprint>` from here: https://cloud.digitalocean.com/account/security", color="red")
|
||||
self.emitter.echo("it should look like `DIGITAL_OCEAN_KEY_FINGERPRINT=88:fb:53:51:09:aa:af:02:e2:99:95:2d:39:64:c1:64`", color="red")
|
||||
raise AttributeError("Could not continue without DIGITAL_OCEAN_KEY_FINGERPRINT environment variable.")
|
||||
self.config['sshkey'] = self.sshkey
|
||||
|
||||
self._write_config()
|
||||
|
||||
def create_new_node_for_staker(self, address):
|
||||
|
||||
response = requests.post("https://api.digitalocean.com/v2/droplets",
|
||||
{
|
||||
"name": f'{self.namespace}-{address}',
|
||||
"region": self.region,
|
||||
"size": self.instance_size,
|
||||
"image":"ubuntu-20-04-x64",
|
||||
"ssh_keys": [self.sshkey]
|
||||
},
|
||||
headers = {
|
||||
"Authorization": f'Bearer {self.token}'
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code < 300:
|
||||
resp = response.json()
|
||||
|
||||
new_node_id = resp['droplet']['id']
|
||||
node_data = {'InstanceId': new_node_id}
|
||||
|
||||
self.emitter.echo("\twaiting for instance to come online...")
|
||||
|
||||
instance_ip = None
|
||||
while not instance_ip:
|
||||
time.sleep(1)
|
||||
|
||||
instance_resp = requests.get(
|
||||
f'https://api.digitalocean.com/v2/droplets/{new_node_id}/',
|
||||
headers = {
|
||||
"Authorization": f'Bearer {self.token}'
|
||||
}
|
||||
).json().get('droplet')
|
||||
if instance_resp['status'] == 'active':
|
||||
if instance_resp.get('networks', {}).get('v4'):
|
||||
instance_ip = instance_resp['networks']['v4'][0]['ip_address']
|
||||
node_data['publicaddress'] = instance_ip
|
||||
node_data['remote_provider'] = self.config.get('blockchain_provider')
|
||||
node_data['provider_deploy_attrs']= self._provider_deploy_attrs
|
||||
return node_data
|
||||
|
||||
else:
|
||||
self.emitter.echo(response.text, color='red')
|
||||
raise BaseException("Error creating resources in DigitalOcean")
|
||||
|
||||
def _destroy_resources(self, stakes):
|
||||
|
||||
existing_instances = copy.copy(self.config.get('instances'))
|
||||
if existing_instances:
|
||||
for address, instance in existing_instances.items():
|
||||
if stakes and not address in stakes:
|
||||
continue
|
||||
self.emitter.echo(f"deleting worker instance for {address} in 3 seconds...", color='red')
|
||||
time.sleep(3)
|
||||
if requests.delete(
|
||||
f'https://api.digitalocean.com/v2/droplets/{instance["InstanceId"]}/',
|
||||
headers = {
|
||||
"Authorization": f'Bearer {self.token}'
|
||||
}).status_code == 204:
|
||||
self.emitter.echo(f"\tdestroyed instance for {address}")
|
||||
del self.config['instances'][address]
|
||||
self._write_config()
|
||||
else:
|
||||
raise
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class AWSNodeConfigurator(BaseCloudNodeConfigurator):
|
||||
|
||||
"""
|
||||
gets a node up and running.
|
||||
"""
|
||||
|
||||
provider_name = 'aws'
|
||||
EC2_INSTANCE_SIZE = 't3.small'
|
||||
|
||||
# TODO: this probably needs to be region specific...
|
||||
EC2_AMI = 'ami-09dd2e08d601bff67'
|
||||
preferred_platform = 'ubuntu-focal' #unused
|
||||
|
||||
@property
|
||||
def _provider_deploy_attrs(self):
|
||||
return [
|
||||
{'key': 'ansible_ssh_private_key_file', 'value': self.config['keypair_path']},
|
||||
{'key': 'default_user', 'value': 'ubuntu'}
|
||||
]
|
||||
|
||||
def _configure_provider_params(self, provider_profile):
|
||||
|
||||
# some attributes we will configure later
|
||||
self.vpc = None
|
||||
|
||||
# if boto3 is not available, inform the user they'll need it.
|
||||
try:
|
||||
import boto3
|
||||
except ImportError:
|
||||
self.emitter.echo("You need to have boto3 installed to use this feature (pip3 install boto3)", color='red')
|
||||
raise AttributeError("boto3 not found.")
|
||||
# figure out which AWS account to use.
|
||||
|
||||
# find aws profiles on user's local environment
|
||||
profiles = boto3.session.Session().available_profiles
|
||||
|
||||
self.profile = provider_profile or self.config.get('profile')
|
||||
if not self.profile:
|
||||
self.emitter.echo("Aws nodes can only be created with an aws profile. (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)", color='red')
|
||||
raise AttributeError("AWS profile not configured.")
|
||||
self.emitter.echo(f'using profile: {self.profile}')
|
||||
if self.profile in profiles:
|
||||
self.session = boto3.Session(profile_name=self.profile)
|
||||
self.ec2Client = self.session.client('ec2')
|
||||
self.ec2Resource = self.session.resource('ec2')
|
||||
else:
|
||||
if profiles:
|
||||
self.emitter.echo(f"please select a profile (--aws-profile) from your aws profiles: {profiles}", color='red')
|
||||
else:
|
||||
self.emitter.echo(f"no aws profiles could be found. Ensure aws is installed and configured: https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html", color='red')
|
||||
if self.profile:
|
||||
self.config['profile'] = self.profile
|
||||
|
||||
self.keypair = self.config.get('keypair')
|
||||
if not self.keypair:
|
||||
self.keypair, keypair_path = self._create_keypair()
|
||||
self.config['keypair_path'] = keypair_path
|
||||
|
||||
self.config['keypair'] = self.keypair
|
||||
|
||||
@property
|
||||
def aws_tags(self):
|
||||
# to keep track of the junk we put in the cloud
|
||||
return [{"Key": "Name", "Value": self.namespace}]
|
||||
|
||||
def _create_keypair(self):
|
||||
new_keypair_data = self.ec2Client.create_key_pair(KeyName=f'{self.namespace}')
|
||||
outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace}.awskeypair')
|
||||
os.makedirs(os.path.dirname(outpath), exist_ok=True)
|
||||
with open(outpath, 'w') as outfile:
|
||||
outfile.write(new_keypair_data['KeyMaterial'])
|
||||
# set local keypair permissions https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
|
||||
os.chmod(outpath, 0o400)
|
||||
self.emitter.echo(f"a new aws keypair was saved to {outpath}, keep it safe.", color='yellow')
|
||||
return new_keypair_data['KeyName'], outpath
|
||||
|
||||
def _delete_keypair(self):
|
||||
# only use self.namespace here to avoid accidental deletions of pre-existing keypairs
|
||||
deleted_keypair_data = self.ec2Client.delete_key_pair(KeyName=f'{self.namespace}')
|
||||
if deleted_keypair_data['HTTPStatusCode'] == 200:
|
||||
outpath = os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, f'{self.namespace}.awskeypair')
|
||||
os.remove(outpath)
|
||||
self.emitter.echo(f"keypair at {outpath}, was deleted", color='yellow')
|
||||
|
||||
def _ensure_vpc(self):
|
||||
"""creates an aws virtual private cloud if one doesn't exist"""
|
||||
|
||||
try:
|
||||
from botocore import exceptions as botoexceptions
|
||||
except ImportError:
|
||||
self.emitter.echo("You need to have boto3 installed to use this feature (pip3 install boto3)")
|
||||
return
|
||||
|
||||
if not self.vpc:
|
||||
vpc_id = self.config.get('Vpc')
|
||||
if vpc_id:
|
||||
self.vpc = self.ec2Resource.Vpc(vpc_id)
|
||||
else:
|
||||
try:
|
||||
vpcdata = self.ec2Client.create_vpc(CidrBlock='172.16.0.0/16')
|
||||
except botoexceptions.NoCredentialsError:
|
||||
raise ValueError(f'Could create AWS resource with profile "{self.profile}" and keypair "{self.keypair}", please run this command with --aws-profile and --aws-keypair to specify matching aws credentials')
|
||||
self.vpc = self.ec2Resource.Vpc(vpcdata['Vpc']['VpcId'])
|
||||
self.vpc.wait_until_available()
|
||||
self.vpc.create_tags(Tags=self.aws_tags)
|
||||
self.vpc.modify_attribute(EnableDnsSupport = { 'Value': True })
|
||||
self.vpc.modify_attribute(EnableDnsHostnames = { 'Value': True })
|
||||
self.config['Vpc'] = vpc_id = self.vpc.id
|
||||
self._write_config()
|
||||
return self.vpc
|
||||
|
||||
def _configure_path_to_internet(self):
|
||||
"""
|
||||
create and configure all the little AWS bits we need to get an internet request
|
||||
from the internet to our node and back
|
||||
"""
|
||||
|
||||
if not self.config.get('InternetGateway'):
|
||||
gatewaydata = self.ec2Client.create_internet_gateway()
|
||||
self.config['InternetGateway'] = gateway_id = gatewaydata['InternetGateway']['InternetGatewayId']
|
||||
# tag it
|
||||
self._write_config()
|
||||
self.ec2Resource.InternetGateway(
|
||||
self.config['InternetGateway']).create_tags(Tags=self.aws_tags)
|
||||
|
||||
self.vpc.attach_internet_gateway(InternetGatewayId=self.config['InternetGateway'])
|
||||
|
||||
routetable_id = self.config.get('RouteTable')
|
||||
if not routetable_id:
|
||||
routetable = self.vpc.create_route_table()
|
||||
self.config['RouteTable'] = routetable_id = routetable.id
|
||||
self._write_config()
|
||||
routetable.create_tags(Tags=self.aws_tags)
|
||||
|
||||
routetable = self.ec2Resource.RouteTable(routetable_id)
|
||||
routetable.create_route(DestinationCidrBlock='0.0.0.0/0', GatewayId=self.config['InternetGateway'])
|
||||
|
||||
if not self.config.get('Subnet'):
|
||||
subnetdata = self.ec2Client.create_subnet(CidrBlock='172.16.1.0/24', VpcId=self.vpc.id)
|
||||
self.config['Subnet'] = subnet_id = subnetdata['Subnet']['SubnetId']
|
||||
self._write_config()
|
||||
self.ec2Resource.Subnet(subnet_id).create_tags(Tags=self.aws_tags)
|
||||
|
||||
routetable.associate_with_subnet(SubnetId=self.config['Subnet'])
|
||||
|
||||
if not self.config.get('SecurityGroup'):
|
||||
securitygroupdata = self.ec2Client.create_security_group(GroupName=f'Ursula-{self.namespace}', Description='ssh and Nucypher ports', VpcId=self.config['Vpc'])
|
||||
self.config['SecurityGroup'] = sg_id = securitygroupdata['GroupId']
|
||||
self._write_config()
|
||||
securitygroup = self.ec2Resource.SecurityGroup(sg_id)
|
||||
securitygroup.create_tags(Tags=self.aws_tags)
|
||||
|
||||
securitygroup.authorize_ingress(CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=22, ToPort=22)
|
||||
# TODO: is it always 9151? Does that matter? Should this be configurable?
|
||||
securitygroup.authorize_ingress(CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=URSULA_PORT, ToPort=URSULA_PORT)
|
||||
for port in PROMETHEUS_PORTS:
|
||||
securitygroup.authorize_ingress(CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=port, ToPort=port)
|
||||
|
||||
def _do_setup_for_instance_creation(self):
|
||||
if not getattr(self, 'profile', None):
|
||||
self.emitter.echo("Aws nodes can only be created with an aws profile. (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)", color='red')
|
||||
raise AttributeError("AWS profile not configured.")
|
||||
|
||||
self.emitter.echo("ensuring that prerequisite cloud resources exist for instance creation.")
|
||||
self._ensure_vpc()
|
||||
self._configure_path_to_internet()
|
||||
self.emitter.echo("all prerequisite cloud resources do exist.")
|
||||
|
||||
def _destroy_resources(self, stakes):
|
||||
try:
|
||||
from botocore import exceptions as botoexceptions
|
||||
except ImportError:
|
||||
self.emitter.echo("You need to have boto3 installed to use this feature (pip3 install boto3)")
|
||||
return
|
||||
|
||||
vpc = self.ec2Resource.Vpc(self.config['Vpc'])
|
||||
if self.config.get('instances'):
|
||||
for address, instance in self.config['instances'].items():
|
||||
if stakes and not address in stakes:
|
||||
continue
|
||||
self.emitter.echo(f"deleting worker instance for {address} in 3 seconds...", color='red')
|
||||
time.sleep(3)
|
||||
self.ec2Resource.Instance(instance['InstanceId']).terminate()
|
||||
del self.config['instances'][address]
|
||||
self._write_config()
|
||||
|
||||
if not self.config.instances:
|
||||
self.emitter.echo("waiting for instance termination...")
|
||||
time.sleep(10)
|
||||
for subresource in ['Subnet', 'RouteTable', 'SecurityGroup']:
|
||||
tries = 0
|
||||
while self.config.get(subresource) and tries < 10:
|
||||
try:
|
||||
getattr(self.ec2Resource, subresource)(self.config[subresource]).delete()
|
||||
self.emitter.echo(f'deleted {subresource}: {self.config[subresource]}')
|
||||
del self.config[subresource]
|
||||
self._write_config()
|
||||
except botoexceptions.ClientError as e:
|
||||
tries += 1
|
||||
self.emitter.echo(f'failed to delete {subresource}, because: {e}.. trying again in 10...', color="yellow")
|
||||
time.sleep(10)
|
||||
if tries > 10:
|
||||
self.emitter.echo("some resources could not be deleted because AWS is taking awhile to delete things. Run this command again in a minute or so...", color="yellow")
|
||||
return False
|
||||
|
||||
if self.config.get('InternetGateway'):
|
||||
self.ec2Resource.InternetGateway(self.config['InternetGateway']).detach_from_vpc(VpcId=self.config['Vpc'])
|
||||
self.ec2Resource.InternetGateway(self.config['InternetGateway']).delete()
|
||||
self.emitter.echo(f'deleted InternetGateway: {self.config["InternetGateway"]}')
|
||||
del self.config['InternetGateway']
|
||||
self._write_config()
|
||||
|
||||
if self.config.get('Vpc'):
|
||||
vpc.delete()
|
||||
self.emitter.echo(f'deleted Vpc: {self.config["Vpc"]}')
|
||||
del self.config['Vpc']
|
||||
|
||||
if self.config.get('keypair'):
|
||||
self.emitter.echo(f'deleting keypair {self.keypair} in 5 seconds...', color='red')
|
||||
time.sleep(6)
|
||||
self.ec2Client.delete_key_pair(KeyName=self.config.get('keypair'))
|
||||
del self.config['keypair']
|
||||
os.remove(self.config['keypair_path'])
|
||||
|
||||
return True
|
||||
|
||||
def create_new_node_for_staker(self, address):
|
||||
new_instance_data = self.ec2Client.run_instances(
|
||||
ImageId=self.EC2_AMI,
|
||||
InstanceType=self.EC2_INSTANCE_SIZE,
|
||||
MaxCount=1,
|
||||
MinCount=1,
|
||||
KeyName=self.keypair,
|
||||
NetworkInterfaces=[
|
||||
{
|
||||
'AssociatePublicIpAddress': True,
|
||||
'DeleteOnTermination': True,
|
||||
'DeviceIndex': 0,
|
||||
'Groups': [
|
||||
self.config['SecurityGroup']
|
||||
],
|
||||
'SubnetId': self.config['Subnet'],
|
||||
},
|
||||
],
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'instance',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'Name',
|
||||
'Value': f'{self.namespace}-{address}'
|
||||
},
|
||||
]
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
node_data = {'InstanceId': new_instance_data['Instances'][0]['InstanceId']}
|
||||
|
||||
instance = self.ec2Resource.Instance(new_instance_data['Instances'][0]['InstanceId'])
|
||||
self.emitter.echo("\twaiting for instance to come online...")
|
||||
instance.wait_until_running()
|
||||
instance.load()
|
||||
node_data['publicaddress'] = instance.public_dns_name
|
||||
node_data['provider_deploy_attrs']= self._provider_deploy_attrs
|
||||
|
||||
return node_data
|
||||
|
||||
class GenericConfigurator(BaseCloudNodeConfigurator):
|
||||
|
||||
provider_name = 'generic'
|
||||
|
||||
def create_nodes_for_stakers(self, stakers, host_address, login_name, key_path, ssh_port):
|
||||
|
||||
if not self.config.get('instances'):
|
||||
self.config['instances'] = {}
|
||||
|
||||
for address in stakers:
|
||||
node_data = self.config['instances'].get(address, {})
|
||||
if node_data:
|
||||
self.emitter.echo(f"Host info already exists for staker {address}; Updating and proceeding.", color="yellow")
|
||||
time.sleep(3)
|
||||
|
||||
node_data['publicaddress'] = host_address
|
||||
node_data['provider'] = self.provider_name
|
||||
node_data['provider_deploy_attrs'] = [
|
||||
{'key': 'ansible_ssh_private_key_file', 'value': key_path},
|
||||
{'key': 'default_user', 'value': login_name},
|
||||
{'key': 'ansible_port', 'value': ssh_port}
|
||||
]
|
||||
|
||||
self.config['instances'][address] = node_data
|
||||
if self.config['seed_network'] and not self.config.get('seed_node'):
|
||||
self.config['seed_node'] = node_data['publicaddress']
|
||||
self._write_config()
|
||||
self.created_new_nodes = True
|
||||
|
||||
return self.config
|
||||
|
||||
|
||||
|
||||
class CloudDeployers:
|
||||
|
||||
aws = AWSNodeConfigurator
|
||||
digitalocean = DigitalOceanConfigurator
|
||||
generic = GenericConfigurator
|
||||
|
||||
@staticmethod
|
||||
def get_deployer(name):
|
||||
return getattr(CloudDeployers, name)
|
|
@ -0,0 +1,32 @@
|
|||
all:
|
||||
children:
|
||||
nucypher:
|
||||
children:
|
||||
{{deployer.network}}:
|
||||
children:
|
||||
nodes:
|
||||
vars:
|
||||
network_name: "{{deployer.network}}"
|
||||
geth_options: "--{{deployer.chain_name}}"
|
||||
geth_dir: '/home/nucypher/geth/.ethereum/{{deployer.chain_name}}/'
|
||||
geth_container_geth_datadir: "/root/.ethereum/{{deployer.chain_name}}"
|
||||
nucypher_container_geth_datadir: "/root/.local/share/geth/.ethereum/{{deployer.chain_name}}"
|
||||
etherscan_domain: {{deployer.chain_name}}.etherscan.io
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
ansible_connection: ssh
|
||||
NUCYPHER_KEYRING_PASSWORD: {{deployer.config['keyringpassword']}}
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: {{deployer.config['ethpassword']}}
|
||||
nucypher_image: {{deployer.config['nucypher_image']}}
|
||||
blockchain_provider: {{deployer.config['blockchain_provider']}}
|
||||
node_is_decentralized: {{deployer.nodes_are_decentralized}}
|
||||
prometheus: {% if deployer.config.get('use-prometheus') %}--prometheus --metrics-port {{deployer.PROMETHEUS_PORT}}{% endif %}
|
||||
SEED_NODE_URI:{% if deployer.config.get('seed_node') %} {{deployer.config['seed_node']}}{% endif %}
|
||||
{% if deployer.config.get('sentry_dsn')%}SENTRY_DSN: {{deployer.config['sentry_dsn']}}{% endif %}
|
||||
wipe_nucypher_config: {{wipe_nucypher}}
|
||||
hosts:{% for node in nodes %}
|
||||
{{node.publicaddress}}:{% for attr in node.provider_deploy_attrs %}
|
||||
{{attr.key}}: {{attr.value}}{% endfor %}{% if node.blockchain_provider %}
|
||||
blockchain_provider: {{node.blockchain_provider}}{% endif %}{% if node.nucypher_image %}
|
||||
nucypher_image: {{node.nucypher_image}}{% endif %}{% if node.sentry_dsn %}
|
||||
sentry_dsn: {{node.sentry_dsn}}{% endif %}
|
||||
{% endfor %}
|
Loading…
Reference in New Issue