mirror of https://github.com/nucypher/nucypher.git
Purge stale automation scripts
parent
92d74ade11
commit
b29d6e2b26
|
@ -1,11 +0,0 @@
|
|||
[defaults]
|
||||
|
||||
host_key_auto_add = True
|
||||
host_key_checking = False
|
||||
inventory = ./inventory/
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
retry_files_enabled = False
|
||||
retry_files_save_path = "./retries/"
|
||||
|
||||
# Ubuntu
|
||||
control_path = %(directory)s/%%h-%%r
|
|
@ -1,78 +0,0 @@
|
|||
# Deploying Nucypher (worker/staker) to Azure Cloud
|
||||
|
||||
|
||||
If you have Ansible setup to run playbooks against the Azure resource API then you can run the `deploy_nucypher_azure_infra.yml`
|
||||
|
||||
|
||||
### Setting up a environment for running Ansible Azure
|
||||
|
||||
You have 3 options for using Ansible to deploy your infrastructure:
|
||||
|
||||
1. Utilize the "cloud shell" within the Azure portal which comes pre-installed with Ansible and your credentials.
|
||||
2. Use your own copy of Ansible and install the Azure module (through pip)
|
||||
3. Setup your own deployment machine on Ubuntu to run playbooks and deploy stakers/workers.
|
||||
|
||||
Option 1 is ready to go, use the play book `deploy_nucypher_azure_infra.yml` followed by the playbooks in the /worker/ folder
|
||||
|
||||
For options 2 you will need Ansible (Azure module) installed on your local host (documentation [here](https://docs.ansible.com/ansible/latest/scenario_guides/guide_azure.html)).
|
||||
|
||||
For option 3 I've included the following steps below to setup a vanilla Ubuntu node to run Ansible (w/ Azure module), geth, and everything you need to deploy the Ansible playbooks for your Nucypher staker/workers.
|
||||
|
||||
(Instructions valid w/ Canonical Ubuntu 16.04/18.04)
|
||||
|
||||
|
||||
#### Install virtualenv and activate
|
||||
```console
|
||||
azureuser@ncdeploy:~$ sudo apt-get update
|
||||
azureuser@ncdeploy:~$ sudo apt-get install -y virtualenv
|
||||
azureuser@ncdeploy:~$ virtualenv nucypher_ansible
|
||||
azureuser@ncdeploy:~$ source nucypher_ansible/bin/activate
|
||||
```
|
||||
#### Install Ansible (w/ Azure module) inside a virtual environment
|
||||
```console
|
||||
azureuser@ncdeploy:~$ pip install 'ansible[azure]'
|
||||
```
|
||||
#### Export environment variables (Azure credentials)
|
||||
```console
|
||||
azureuser@ncdeploy:~$ export AZURE_CLIENT_ID=''
|
||||
azureuser@ncdeploy:~$ export AZURE_SECRET=''
|
||||
azureuser@ncdeploy:~$ export AZURE_SUBSCRIPTION_ID=''
|
||||
azureuser@ncdeploy:~$ export AZURE_TENANT=''
|
||||
```
|
||||
#### Create 2GB swap file (for local geth instance)
|
||||
```console
|
||||
azureuser@ncdeploy:~$ sudo fallocate -l 2G /swapfile
|
||||
azureuser@ncdeploy:~$ sudo chmod 600 /swapfile
|
||||
azureuser@ncdeploy:~$ sudo mkswap /swapfile
|
||||
azureuser@ncdeploy:~$ sudo swapon /swapfile
|
||||
azureuser@ncdeploy:~$ sudo cp /etc/fstab /etc/fstab.bak
|
||||
azureuser@ncdeploy:~$ echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
|
||||
```
|
||||
#### Install geth
|
||||
```console
|
||||
azureuser@ncdeploy:~$ sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
azureuser@ncdeploy:~$ sudo apt-get update
|
||||
azureuser@ncdeploy:~$ sudo apt-get install -y ethereum
|
||||
```
|
||||
#### Run geth (goerli testnet)
|
||||
```console
|
||||
azureuser@ncdeploy:~$ nohup geth --goerli --syncmode fast --cache 1024 &
|
||||
```
|
||||
#### Check geth is finished syncing
|
||||
```console
|
||||
azureuser@ncdeploy:~$ geth attach ~/.ethereum/goerli/geth.ipc
|
||||
(within geth): eth.syncing
|
||||
```
|
||||
Wait for the result from above to come back as false
|
||||
|
||||
#### Run ansible playbook to deploy Nucypher Staker and Worker(s)
|
||||
|
||||
<ins>Inventory values:</ins>
|
||||
* Azure Location: West Central US (typcially one of the lowest cost locations)
|
||||
* Linux Distribution: Ubuntu 18.04 LTS
|
||||
* VM Size: B1s (1 vCPU , 1GB RAM, 4GB Ephemeral Disk)
|
||||
* Make sure to update the inventory file with your public key for login.
|
||||
|
||||
```console
|
||||
azureuser@ncdeploy:~$ ansible-playbook deploy_nucypher_azure_infra.yml -i inventory.yml
|
||||
```
|
|
@ -1,85 +0,0 @@
|
|||
- name: Nucypher (staker/worker) VM for Azure
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
|
||||
- name: Resource Group for Nucypher nodes (deployment location is determined below)
|
||||
azure_rm_resourcegroup:
|
||||
name: "{{ resgroup }}"
|
||||
location: "{{ cloudregion }}"
|
||||
|
||||
- name: Virtual Network
|
||||
azure_rm_virtualnetwork:
|
||||
resource_group: "{{ resgroup }}"
|
||||
name: "{{ vnet }}"
|
||||
address_prefixes: "10.0.0.0/16"
|
||||
|
||||
- name: Subnet
|
||||
azure_rm_subnet:
|
||||
resource_group: "{{ resgroup }}"
|
||||
name: "{{ subnet }}"
|
||||
address_prefix: "10.0.1.0/24"
|
||||
virtual_network: "{{ vnet }}"
|
||||
|
||||
- name: Create public IP address
|
||||
azure_rm_publicipaddress:
|
||||
resource_group: "{{ resgroup }}"
|
||||
allocation_method: Static
|
||||
name: "{{ item.ip }}"
|
||||
register: output_ip_address
|
||||
loop: "{{ vmlist }}"
|
||||
|
||||
- name: Allow SSH and Nucypher communication ports in network security group
|
||||
azure_rm_securitygroup:
|
||||
resource_group: "{{ resgroup }}"
|
||||
name: "{{ item.nsg }}"
|
||||
rules:
|
||||
- name: SSH
|
||||
protocol: Tcp
|
||||
destination_port_range: 22
|
||||
access: Allow
|
||||
priority: 1001
|
||||
direction: Inbound
|
||||
|
||||
- name: nucypher_inbound
|
||||
protocol: Tcp
|
||||
destination_port_range: 9151
|
||||
access: Allow
|
||||
priority: 1002
|
||||
direction: Inbound
|
||||
|
||||
- name: nucypher_outbound
|
||||
protocol: Tcp
|
||||
destination_port_range: 9151
|
||||
access: Allow
|
||||
priority: 3002
|
||||
direction: Outbound
|
||||
loop: "{{ vmlist }}"
|
||||
|
||||
- name: Virtual Network VM NIC
|
||||
azure_rm_networkinterface:
|
||||
resource_group: "{{ resgroup }}"
|
||||
name: "{{ item.nic }}"
|
||||
virtual_network: "{{ vnet }}"
|
||||
subnet: "{{ subnet }}"
|
||||
public_ip_name: "{{ item.ip }}"
|
||||
security_group: "{{ item.nsg }}"
|
||||
loop: "{{ vmlist }}"
|
||||
|
||||
- name: Create VM
|
||||
azure_rm_virtualmachine:
|
||||
resource_group: "{{ resgroup }}"
|
||||
name: "{{ item.name }}"
|
||||
vm_size: "{{ item.size }}"
|
||||
admin_username: "{{ user }}"
|
||||
ssh_password_enabled: false
|
||||
ssh_public_keys:
|
||||
- path: "/home/{{ user }}/.ssh/authorized_keys"
|
||||
key_data: "{{ osshpkey }}"
|
||||
network_interfaces: "{{ item.nic }}"
|
||||
image:
|
||||
offer: UbuntuServer
|
||||
publisher: Canonical
|
||||
sku: 18.04-LTS
|
||||
version: latest
|
||||
loop: "{{ vmlist }}"
|
|
@ -1,12 +0,0 @@
|
|||
all:
|
||||
vars:
|
||||
cloudregion: "westcentralus"
|
||||
resgroup: "ncrg"
|
||||
vnet: "ncvnet"
|
||||
subnet: "ncsnet"
|
||||
osshpkey: "<open-ssh-public-key>"
|
||||
user: "azureuser"
|
||||
vmlist:
|
||||
- { name: "ncstaker", nic: "stakernic", ip: "stakerip", nsg: "stakernsg", size: "Standard_B1s" }
|
||||
- { name: "ncworker1", nic: "ncworker1nic", ip: "ncworker1ip", nsg: "ncworker1nsg", size: "Standard_B1s" }
|
||||
- { name: "ncworker2", nic: "ncworker2nic", ip: "ncworker2ip", nsg: "ncworker2nsg", size: "Standard_B1s" }
|
|
@ -1,76 +0,0 @@
|
|||
- name: "Launch ETH Netstats"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_eth_netstats' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
|
||||
tasks:
|
||||
|
||||
- name: "Install System Dependencies"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
update_cache: yes
|
||||
vars:
|
||||
packages:
|
||||
- python-pip
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-dev
|
||||
- python3-setuptools
|
||||
- libffi-dev
|
||||
- nodejs
|
||||
- npm
|
||||
- grunt
|
||||
|
||||
- name: Install "grunt-cli" node.js package globally
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
npm:
|
||||
name: grunt-cli
|
||||
global: yes
|
||||
|
||||
- git:
|
||||
repo: "https://github.com/cubedro/eth-netstats"
|
||||
dest: /home/ubuntu/code
|
||||
version: "master"
|
||||
|
||||
- name: "Install JS Dependencies with NPM"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: npm install
|
||||
args:
|
||||
chdir: /home/ubuntu/code
|
||||
|
||||
- name: "Build Application with Grunt"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: grunt all
|
||||
args:
|
||||
chdir: ./code
|
||||
|
||||
- name: "Render ETH Netstats Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/eth_netstats.j2
|
||||
dest: /etc/systemd/system/eth_netstats.service
|
||||
mode: 0755
|
||||
vars:
|
||||
port: "3000"
|
||||
secret: "{{ lookup('env', 'ETH_NETSTATS_SECRET') }}"
|
||||
prefix: "/home/ubuntu/code"
|
||||
|
||||
- name: "Enable and Start ETH Netstats Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "eth_netstats"
|
|
@ -1,6 +0,0 @@
|
|||
enode://bf150c793f378775e8cf09bee4fba37ea65363fe7a41171790a80ef6462de619cad2c05f42fc58655ad317503d5da8fee898e911fdf386ac6d15da12b5e883eb@3.92.166.78:30301
|
||||
enode://13da3c4b5b1ca32dfb0fcd662b9c69daf6b564e6f791ddae107d57049f25952aac329de336fd393f5b42b6aa2bbb263d7aa5c426b473be611739795aa18b0212@54.173.27.77:30303
|
||||
enode://4f7a27820107c235bb0f8086ee1c2bad62174450ec2eec12cb29e3fa7ecb9f332710373c1d11a3115aa72f2dabbae27b73eac51f06d3df558dd9fb51007da653@52.91.112.249:30303
|
||||
enode://6b58a9437aa88f254b75110019c54807cf1d7da9729f2c022a2463bae86b639288909fe00ffac0599e616676eea2de3c503bacaf4be835a02195bea0b349ca80@54.88.246.77:30303
|
||||
enode://562051180eca42514e44b4428ed20a3cb626654631f53bbfa549de7d3b7e418376e8f784c232429d7ff01bd0597e3ce7327699bb574d39ac3b2ac1729ed0dd44@54.224.110.32:30303
|
||||
enode://d372b6a4ebd63a39d55cb9a50fc3c8a95ef0cbb1921da20c7e1de3dbf94a5f82969fe6396140e89bf73b792af291c91446ea8851fe0aae6847e934d8a52b22a4@34.226.198.231:30303
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"coinbase" : "0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1",
|
||||
"difficulty" : "10000",
|
||||
"extraData" : "0x",
|
||||
"gasLimit" : "8000000",
|
||||
"nonce" : "0x0112358132134550",
|
||||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"alloc": {
|
||||
"0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1": {"balance": "100000000000000000000000"}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 112358,
|
||||
"homesteadBlock": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
---
|
||||
|
||||
- hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_bootnodes' }}"
|
||||
name: "GatherBootnode Facts"
|
||||
user: ubuntu
|
||||
tasks: [ ]
|
||||
|
||||
- name: "Initialize blockchain database"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_miners' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
tasks:
|
||||
|
||||
- name: "Render Genesis Configuration"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ./files/genesis.j2
|
||||
dest: /home/ubuntu/genesis.json
|
||||
mode: 0755
|
||||
|
||||
- name: "Create Custom Blockchain Data Directory"
|
||||
file:
|
||||
path: /home/ubuntu/chaindata
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: "Learn About Existing Accounts"
|
||||
shell: geth account list --datadir {{datadir}}
|
||||
register: geth_accounts
|
||||
vars:
|
||||
datadir: "/home/ubuntu/chaindata"
|
||||
|
||||
- name: "Generate Geth Password"
|
||||
shell: head -c 32 /dev/urandom | sha256sum | awk '{print $1}'
|
||||
register: geth_password
|
||||
|
||||
- name: "Create Geth Account"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: geth account new --datadir /home/ubuntu/chaindata --password ./password.txt
|
||||
register: new_geth_account
|
||||
when: "'Account' not in geth_accounts.stdout"
|
||||
|
||||
- name: "Initialize New Blockchain - Awh"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: geth --datadir {{datadir}} --networkid {{networkid}} init {{genesis_file}}
|
||||
vars:
|
||||
networkid: "112358"
|
||||
datadir: "/home/ubuntu/chaindata"
|
||||
genesis_file: "/home/ubuntu/genesis.json"
|
|
@ -1,80 +0,0 @@
|
|||
- name: "Launch Geth Bootnodes"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_bootnodes' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
|
||||
tasks:
|
||||
|
||||
- name: "Register Ethereum PPA"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt_repository:
|
||||
repo: 'ppa:ethereum/ethereum'
|
||||
state: present
|
||||
|
||||
- name: "Install System Dependencies"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
update_cache: yes
|
||||
vars:
|
||||
packages:
|
||||
- python-pip
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-dev
|
||||
- python3-setuptools
|
||||
- libffi-dev
|
||||
- software-properties-common
|
||||
- ethereum
|
||||
|
||||
- name: "Generate Bootnode Keys"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: bootnode --genkey /home/ubuntu/bootnode.key --verbosity 6
|
||||
|
||||
- name: "Register Geth Bootnode Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/geth_bootnode.j2
|
||||
dest: /etc/systemd/system/geth_botnode.service
|
||||
mode: 0755
|
||||
|
||||
- name: "Register Bootnode Public Key"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: bootnode --nodekey /home/ubuntu/bootnode.key --writeaddress
|
||||
register: bootnode_public_key
|
||||
|
||||
- name: "Locally Save Bootnode Metadata"
|
||||
local_action:
|
||||
module: lineinfile
|
||||
dest: ./files/bootnodes.txt
|
||||
create: yes
|
||||
line: enode://{{ bootnode_public_key.stdout }}@{{inventory_hostname}}:{{ bootnode_port }}
|
||||
vars:
|
||||
bootnode_port: "30301"
|
||||
|
||||
- name: "Render Geth Bootnode Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/geth_bootnode.j2
|
||||
dest: /etc/systemd/system/geth_bootnode.service
|
||||
mode: 0755
|
||||
|
||||
- name: "Enable and Start Geth Bootnode Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "geth_bootnode"
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
|
||||
- hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_bootnodes' }}"
|
||||
name: "GatherBootnode Facts"
|
||||
user: ubuntu
|
||||
tasks: [ ]
|
||||
|
||||
- name: "Launch Geth Nodes"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_miners' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
|
||||
tasks:
|
||||
|
||||
- name: "Register Ethereum PPA"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt_repository:
|
||||
repo: 'ppa:ethereum/ethereum'
|
||||
state: present
|
||||
|
||||
- name: "Install System Dependencies"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
update_cache: yes
|
||||
vars:
|
||||
packages:
|
||||
- python-pip
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-dev
|
||||
- python3-setuptools
|
||||
- libffi-dev
|
||||
- software-properties-common
|
||||
- ethereum
|
||||
- npm
|
||||
|
||||
- name: Install "PM2" node.js package globally
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
npm:
|
||||
name: pm2
|
||||
global: yes
|
||||
|
||||
- name: "Render Geth Node Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/geth_miner.j2
|
||||
dest: /etc/systemd/system/geth_miner.service
|
||||
mode: 0755
|
||||
vars:
|
||||
datadir: "/home/ubuntu/chaindata"
|
||||
networkid: "112358"
|
||||
rpchost: "localhost"
|
||||
syncmode: "full"
|
||||
nickname: "NuCypher-Testnet-{{ hostvars[inventory_hostname].ec2_tag_Name }}"
|
||||
eth_netstats_secret: "{{ lookup('env', 'ETH_NETSTATS_SECRET') }}"
|
||||
eth_netstats_ip: "{{ hostvars[groups['tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_eth_netstats'][0]].ansible_host }}"
|
||||
eth_netstats_port: "3000"
|
||||
bootnode_uri: "{{ lookup('file', './files/bootnodes.txt') }}"
|
||||
|
||||
- name: "Enable and Start Geth Node Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "geth_miner"
|
|
@ -1,42 +0,0 @@
|
|||
---
|
||||
|
||||
- name: "Reset Geth Nodes"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_miners' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
|
||||
tasks:
|
||||
- name: "Stop Geth Node Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: stopped
|
||||
name: "geth_miner"
|
||||
|
||||
- name: "Destroy Custom Blockchain Data Directory"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
file:
|
||||
path: /home/ubuntu/chaindata
|
||||
state: absent
|
||||
mode: 0755
|
||||
|
||||
- name: "Destroy Standard Blockchain Data Directory"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
file:
|
||||
path: /home/.ethereum/geth/chaindata
|
||||
state: absent
|
||||
mode: 0755
|
||||
|
||||
- name: "Destroy DAG"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: "rm -rf /root/.ethash"
|
|
@ -1,222 +0,0 @@
|
|||
# Ansible EC2 external inventory script settings
|
||||
#
|
||||
|
||||
[ec2]
|
||||
|
||||
# Remote Host Python Interpreter
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
# to talk to a private eucalyptus instance uncomment these lines
|
||||
# and edit edit eucalyptus_host to be the host name of your cloud controller
|
||||
#eucalyptus = True
|
||||
#eucalyptus_host = clc.cloud.domain.org
|
||||
|
||||
# AWS regions to make calls to. Set this to 'all' to make request to all regions
|
||||
# in AWS and merge the results together. Alternatively, set this to a comma
|
||||
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
|
||||
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
|
||||
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
|
||||
regions = us-west-2
|
||||
regions_exclude = us-gov-west-1, cn-north-1
|
||||
|
||||
# When generating inventory, Ansible needs to know how to address a server.
|
||||
# Each EC2 instance has a lot of variables associated with it. Here is the list:
|
||||
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
|
||||
# Below are 2 variables that are used as the address of a server:
|
||||
# - destination_variable
|
||||
# - vpc_destination_variable
|
||||
|
||||
# This is the normal destination variable to use. If you are running Ansible
|
||||
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
|
||||
# running Ansible from within EC2, then perhaps you want to use the internal
|
||||
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
|
||||
# may optionally be used; however the boto instance variables hold precedence
|
||||
# in the event of a collision.
|
||||
destination_variable = public_dns_name
|
||||
|
||||
# This allows you to override the inventory_name with an ec2 variable, instead
|
||||
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
|
||||
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
|
||||
#hostname_variable = tag_Name
|
||||
|
||||
# For server inside a VPC, using DNS names may not make sense. When an instance
|
||||
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
|
||||
# this to 'ip_address' will return the public IP address. For instances in a
|
||||
# private subnet, this should be set to 'private_ip_address', and Ansible must
|
||||
# be run from within EC2. The key of an EC2 tag may optionally be used; however
|
||||
# the boto instance variables hold precedence in the event of a collision.
|
||||
# WARNING: - instances that are in the private vpc, _without_ public ip address
|
||||
# will not be listed in the inventory until You set:
|
||||
# vpc_destination_variable = private_ip_address
|
||||
vpc_destination_variable = ip_address
|
||||
|
||||
# The following two settings allow flexible ansible host naming based on a
|
||||
# python format string and a comma-separated list of ec2 tags. Note that:
|
||||
#
|
||||
# 1) If the tags referenced are not present for some instances, empty strings
|
||||
# will be substituted in the format string.
|
||||
# 2) This overrides both destination_variable and vpc_destination_variable.
|
||||
#
|
||||
#destination_format = {0}.{1}.example.com
|
||||
#destination_format_tags = Name,environment
|
||||
|
||||
# To tag instances on EC2 with the resource records that point to them from
|
||||
# Route53, set 'route53' to True.
|
||||
route53 = False
|
||||
|
||||
# To use Route53 records as the inventory hostnames, uncomment and set
|
||||
# to equal the domain name you wish to use. You must also have 'route53' (above)
|
||||
# set to True.
|
||||
# route53_hostnames = .example.com
|
||||
|
||||
# To exclude RDS instances from the inventory, uncomment and set to False.
|
||||
#rds = False
|
||||
|
||||
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
|
||||
#elasticache = False
|
||||
|
||||
# Additionally, you can specify the list of zones to exclude looking up in
|
||||
# 'route53_excluded_zones' as a comma-separated list.
|
||||
# route53_excluded_zones = samplezone1.com, samplezone2.com
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Set
|
||||
# 'all_instances' to True to return all instances regardless of state.
|
||||
all_instances = False
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Specify
|
||||
# EC2 instance states to return as a comma-separated list. This
|
||||
# option is overridden when 'all_instances' is True.
|
||||
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
|
||||
|
||||
# By default, only RDS instances in the 'available' state are returned. Set
|
||||
# 'all_rds_instances' to True return all RDS instances regardless of state.
|
||||
all_rds_instances = False
|
||||
|
||||
# Include RDS cluster information (Aurora etc.)
|
||||
include_rds_clusters = False
|
||||
|
||||
# By default, only ElastiCache clusters and nodes in the 'available' state
|
||||
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
|
||||
# to True return all ElastiCache clusters and nodes, regardless of state.
|
||||
#
|
||||
# Note that all_elasticache_nodes only applies to listed clusters. That means
|
||||
# if you set all_elastic_clusters to false, no node will be return from
|
||||
# unavailable clusters, regardless of the state and to what you set for
|
||||
# all_elasticache_nodes.
|
||||
all_elasticache_replication_groups = False
|
||||
all_elasticache_clusters = False
|
||||
all_elasticache_nodes = False
|
||||
|
||||
# API calls to EC2 are slow. For this reason, we cache the results of an API
|
||||
# call. Set this to the path you want cache files to be written to. Two files
|
||||
# will be written to this directory:
|
||||
# - ansible-ec2.cache
|
||||
# - ansible-ec2.index
|
||||
cache_path = ~/.ansible/tmp
|
||||
|
||||
# The number of seconds a cache file is considered valid. After this many
|
||||
# seconds, a new API call will be made, and the cache file will be updated.
|
||||
# To disable the cache, set this value to 0
|
||||
cache_max_age = 300
|
||||
|
||||
# Organize groups into a nested/hierarchy instead of a flat namespace.
|
||||
nested_groups = False
|
||||
|
||||
# Replace - tags when creating groups to avoid issues with ansible
|
||||
replace_dash_in_groups = True
|
||||
|
||||
# If set to true, any tag of the form "a,b,c" is expanded into a list
|
||||
# and the results are used to create additional tag_* inventory groups.
|
||||
expand_csv_tags = False
|
||||
|
||||
# The EC2 inventory output can become very large. To manage its size,
|
||||
# configure which groups should be created.
|
||||
group_by_instance_id = True
|
||||
group_by_region = True
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_ami_id = True
|
||||
group_by_instance_type = True
|
||||
group_by_instance_state = False
|
||||
group_by_platform = True
|
||||
group_by_key_pair = True
|
||||
group_by_vpc_id = True
|
||||
group_by_security_group = True
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = True
|
||||
group_by_route53_names = True
|
||||
group_by_rds_engine = True
|
||||
group_by_rds_parameter_group = True
|
||||
group_by_elasticache_engine = True
|
||||
group_by_elasticache_cluster = True
|
||||
group_by_elasticache_parameter_group = True
|
||||
group_by_elasticache_replication_group = True
|
||||
|
||||
# If you only want to include hosts that match a certain regular expression
|
||||
# pattern_include = staging-*
|
||||
|
||||
# If you want to exclude any hosts that match a certain regular expression
|
||||
# pattern_exclude = staging-*
|
||||
|
||||
# Instance filters can be used to control which instances are retrieved for
|
||||
# inventory. For the full list of possible filters, please read the EC2 API
|
||||
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
|
||||
# Filters are key/value pairs separated by '=', to list multiple filters use
|
||||
# a list separated by commas. To "AND" criteria together, use "&". Note that
|
||||
# the "AND" is not useful along with stack_filters and so such usage is not allowed.
|
||||
# See examples below.
|
||||
|
||||
# If you want to apply multiple filters simultaneously, set stack_filters to
|
||||
# True. Default behaviour is to combine the results of all filters. Stacking
|
||||
# allows the use of multiple conditions to filter down, for example by
|
||||
# environment and type of host.
|
||||
stack_filters = False
|
||||
|
||||
# Retrieve only instances with (key=value) env=staging tag
|
||||
# instance_filters = tag:env=staging
|
||||
|
||||
# Retrieve only instances with role=webservers OR role=dbservers tag
|
||||
# instance_filters = tag:role=webservers,tag:role=dbservers
|
||||
|
||||
# Retrieve only t1.micro instances OR instances with tag env=staging
|
||||
# instance_filters = instance-type=t1.micro,tag:env=staging
|
||||
|
||||
# You can use wildcards in filter values also. Below will list instances which
|
||||
# tag Name value matches webservers1*
|
||||
# (ex. webservers15, webservers1a, webservers123 etc)
|
||||
# instance_filters = tag:Name=webservers1*
|
||||
|
||||
# Retrieve only instances of type t1.micro that also have tag env=stage
|
||||
# instance_filters = instance-type=t1.micro&tag:env=stage
|
||||
|
||||
# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
|
||||
# that are of type m3.large, regardless of env tag
|
||||
# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
|
||||
|
||||
# An IAM role can be assumed, so all requests are run as that role.
|
||||
# This can be useful for connecting across different accounts, or to limit user
|
||||
# access
|
||||
# iam_role = role-arn
|
||||
|
||||
# A boto configuration profile may be used to separate out credentials
|
||||
# see https://boto.readthedocs.io/en/latest/boto_config_tut.html
|
||||
# boto_profile = some-boto-profile-name
|
||||
|
||||
|
||||
[credentials]
|
||||
|
||||
# The AWS credentials can optionally be specified here. Credentials specified
|
||||
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
|
||||
# AWS_PROFILE is set, or if the boto_profile property above is set.
|
||||
#
|
||||
# Supplying AWS credentials here is not recommended, as it introduces
|
||||
# non-trivial security concerns. When going down this route, please make sure
|
||||
# to set access permissions for this file correctly, e.g. handle it the same
|
||||
# way as you would a private SSH key.
|
||||
#
|
||||
# Unlike the boto and AWS configure files, this section does not support
|
||||
# profiles.
|
||||
#
|
||||
# aws_access_key_id = AXXXXXXXXXXXXXX
|
||||
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
File diff suppressed because it is too large
Load Diff
|
@ -1,76 +0,0 @@
|
|||
- name: "Start Moes"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_moes' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
|
||||
- include_vars:
|
||||
file: "{{ networks_filepath }}"
|
||||
name: networks
|
||||
|
||||
tasks:
|
||||
- name: "Install System Dependencies"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
update_cache: yes
|
||||
state: latest
|
||||
vars:
|
||||
packages:
|
||||
- libffi-dev
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-dev
|
||||
- python3-setuptools
|
||||
- python3-virtualenv
|
||||
- virtualenv
|
||||
|
||||
- git:
|
||||
repo: "{{ git_repo }}"
|
||||
dest: ./code
|
||||
version: "{{ git_version }}"
|
||||
|
||||
- pip:
|
||||
chdir: ./code
|
||||
name: '.'
|
||||
editable: true
|
||||
virtualenv: '/home/ubuntu/venv'
|
||||
virtualenv_python: python3.6
|
||||
virtualenv_site_packages: true
|
||||
environment:
|
||||
LC_ALL: en_US.UTF-8
|
||||
LANG: en_US.UTF-8
|
||||
|
||||
- name: "Open Moe HTTP Port"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: 'iptables -A INPUT -p tcp -m conntrack --dport {{ moe_http_port }} --ctstate NEW,ESTABLISHED -j ACCEPT'
|
||||
vars:
|
||||
moe_http_port: 12500
|
||||
|
||||
- name: "Render Moe's Node Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/moe_monitor.j2
|
||||
dest: /etc/systemd/system/moe_monitor.service
|
||||
mode: 0755
|
||||
vars:
|
||||
virtualenv_path: '/home/ubuntu/venv'
|
||||
nucypher_network_domain: "{{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
teacher_uri: "{{ networks[lookup('env', 'NUCYPHER_NETWORK_NAME')][0] }}"
|
||||
|
||||
- name: "Enable and Start Moes's Monitoring"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "moe_monitor"
|
|
@ -1,52 +0,0 @@
|
|||
- name: "Run Ursula"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Clone Monitor Repo
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
ansible.builtin.git:
|
||||
repo: https://github.com/nucypher/nucypher-monitor.git
|
||||
dest: /home/nucypher/nucypher-monitor
|
||||
recursive: yes
|
||||
force: yes
|
||||
|
||||
- name: setup certs dir
|
||||
become: yes
|
||||
ansible.builtin.file:
|
||||
path: "{{NUCYPHER_LETSENCRYPT_DIR}}"
|
||||
state: directory
|
||||
owner: nucypher
|
||||
group: nucypher
|
||||
mode: '0700'
|
||||
|
||||
# - name: Clean any existing Influx DB directory
|
||||
# ansible.builtin.file:
|
||||
# path: /home/nucypher/influxdb
|
||||
# state: absent
|
||||
|
||||
- name: Copy SSL Certs
|
||||
become: yes
|
||||
ansible.builtin.copy:
|
||||
src: "{{LOCAL_CERTS_DIR }}/{{item}}"
|
||||
dest: "{{NUCYPHER_LETSENCRYPT_DIR}}/{{item}}"
|
||||
owner: nucypher
|
||||
group: nucypher
|
||||
mode: '0700'
|
||||
loop:
|
||||
- cert.pem
|
||||
- chain.pem
|
||||
- fullchain.pem
|
||||
- privkey.pem
|
||||
|
||||
- name: Run Monitor (docker-compose up)
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
ansible.builtin.command: docker-compose -f /home/nucypher/nucypher-monitor/deploy/docker-compose.yml up -d --build
|
||||
args:
|
||||
chdir: /home/nucypher/
|
||||
environment:
|
||||
- WEB3_PROVIDER_URI: "{{WEB3_PROVIDER_URI}}"
|
||||
- NUCYPHER_NETWORK: "{{NUCYPHER_NETWORK}}"
|
||||
- NUCYPHER_LETSENCRYPT_DIR: "{{NUCYPHER_LETSENCRYPT_DIR}}"
|
|
@ -1,7 +0,0 @@
|
|||
- name: "Setup Remote Geth"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: ../worker/include/setup_user.yml
|
||||
- import_playbook: ../worker/include/setup_docker.yml
|
||||
- import_playbook: include/run_monitor.yml
|
|
@ -1,45 +0,0 @@
|
|||
- hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
user: ubuntu
|
||||
pre_tasks:
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
vars:
|
||||
nucypher_network_name: "{{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
inventory_group: "{{ nucypher_network_name }}-{{ nucypher_role }}"
|
||||
tasks:
|
||||
- name: "Provision {{ ec2_count }} instances on the {{ ec2_region }} region ({{ inventory_group }})"
|
||||
local_action:
|
||||
module: ec2
|
||||
key_name: "{{ ec2_keypair }}"
|
||||
group_id: "{{ ec2_security_group_id }}"
|
||||
instance_type: "{{ ec2_instance_type }}"
|
||||
image: "{{ ec2_image }}"
|
||||
vpc_subnet_id: "{{ ec2_subnet_id }}"
|
||||
region: "{{ ec2_region }}"
|
||||
instance_tags: '{"Type":"{{ec2_instance_type}}", "Role":"{{inventory_group}}"}'
|
||||
assign_public_ip: yes
|
||||
wait: true
|
||||
exact_count: "{{ ec2_count }}"
|
||||
count_tag:
|
||||
Role: "{{ inventory_group }}"
|
||||
volumes:
|
||||
- device_name: /dev/xvda
|
||||
volume_type: gp2
|
||||
volume_size: "{{ ec2_volume_size }}"
|
||||
delete_on_termination: true
|
||||
register: nucypher_fleet
|
||||
|
||||
- name: "Add Provisioned Servers as Hosts"
|
||||
add_host:
|
||||
name: "{{ item.public_ip }}"
|
||||
groups: "{{ inventory_group }}"
|
||||
ec2_region: "{{ ec2_region }}"
|
||||
ec2_tag_Type: "{{ ec2_tag_Type}}"
|
||||
ec2_tag_Role: "{{ inventory_group }}"
|
||||
ec2_ip_address: "{{ item.public_ip }}"
|
||||
with_items: "{{ nucypher_fleet.instances }}"
|
||||
|
||||
- name: Wait for the instances to boot by checking the ssh port
|
||||
wait_for: host={{item.public_ip}} port=22 delay=15 timeout=300 state=started
|
||||
with_items: "{{ nucypher_fleet.instances }}"
|
|
@ -1,32 +0,0 @@
|
|||
- hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
user: root
|
||||
pre_tasks:
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
vars:
|
||||
nucypher_network_name: "{{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
inventory_group: "{{ nucypher_network_name }}-{{ nucypher_role }}"
|
||||
tasks:
|
||||
- name: Get EC2 instance IDs for {{ nucypher_network_name }}
|
||||
run_once: true
|
||||
ec2_remote_facts:
|
||||
filters:
|
||||
"tag:Type": "{{ ec2_tag_Type }}"
|
||||
"tag:Role": "{{ inventory_group }}"
|
||||
region: "{{ ec2_region }}"
|
||||
register: instances
|
||||
|
||||
- name: display instances
|
||||
run_once: true
|
||||
debug:
|
||||
var: instances
|
||||
|
||||
- name: Remove registered instances
|
||||
run_once: true
|
||||
ec2:
|
||||
state: absent
|
||||
wait: true
|
||||
instance_ids: "{{instances|json_query('instances[*].id')}}"
|
||||
region: "{{ ec2_region }}"
|
||||
when: instances
|
|
@ -1,17 +0,0 @@
|
|||
- name: "Rename Ursula configuration files"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_ursulas' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
vars:
|
||||
old: ~/.local/share/nucypher/Ursula.config
|
||||
new: ~/.local/share/nucypher/ursula.config
|
||||
pre_tasks:
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
tasks:
|
||||
- name: Check for existing configuration file
|
||||
stat: path={{ old }}
|
||||
register: configuration_file
|
||||
|
||||
- name: Rename Configuration File (if existing)
|
||||
command: mv {{ old }} {{ new }}
|
||||
when: configuration_file.stat.exists
|
|
@ -1,14 +0,0 @@
|
|||
- name: "Restart Ursulas"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_ursulas' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: "Restart Ursula Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "ursula_node"
|
|
@ -1,117 +0,0 @@
|
|||
- name: "Start Ursulas"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_ursulas' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Install Python2.7 for Ansible Control"
|
||||
raw: sudo apt -y update && sudo apt install -y python2.7-minimal python2.7-setuptools
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
|
||||
tasks:
|
||||
- name: "Install System Dependencies"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
update_cache: yes
|
||||
state: latest
|
||||
vars:
|
||||
packages:
|
||||
- libffi-dev
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-dev
|
||||
- python3-setuptools
|
||||
- python3-virtualenv
|
||||
- virtualenv
|
||||
|
||||
- git:
|
||||
repo: "{{ git_repo }}"
|
||||
dest: ./code
|
||||
version: "{{ git_version }}"
|
||||
|
||||
- pip:
|
||||
chdir: ./code
|
||||
name: '.'
|
||||
editable: true
|
||||
virtualenv: '/home/ubuntu/venv'
|
||||
virtualenv_python: python3.6
|
||||
virtualenv_site_packages: true
|
||||
environment:
|
||||
LC_ALL: en_US.UTF-8
|
||||
LANG: en_US.UTF-8
|
||||
|
||||
- name: "Check if 'ursula.config' Exists"
|
||||
stat:
|
||||
path: "~/.local/share/nucypher/ursula.config"
|
||||
register: stat_result
|
||||
|
||||
- name: "Generate Ursula Password"
|
||||
shell: head -c 32 /dev/urandom | sha256sum | awk '{print $1}'
|
||||
register: ursula_password
|
||||
when: stat_result.stat.exists == False
|
||||
|
||||
- name: "Configure Ursula"
|
||||
shell: "/home/ubuntu/venv/bin/nucypher ursula init --federated-only --rest-host {{ inventory_hostname }} --network {{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
args:
|
||||
chdir: ./code
|
||||
environment:
|
||||
NUCYPHER_KEYSTORE_PASSWORD: "{{ ursula_password.stdout }}"
|
||||
LC_ALL: en_US.UTF-8
|
||||
LANG: en_US.UTF-8
|
||||
ignore_errors: yes
|
||||
register: configure_ursula_output
|
||||
when: stat_result.stat.exists == False
|
||||
|
||||
- name: "Get Ursula Seed Node Config (and more)"
|
||||
slurp:
|
||||
src: "~/.local/share/nucypher/ursula.config"
|
||||
register: ursula_seed_node_config
|
||||
run_once: true
|
||||
|
||||
- name: "Set Ursula Seed Node Fact"
|
||||
set_fact:
|
||||
seed_node_metadata: "{{ ursula_seed_node_config['content'] | b64decode }}"
|
||||
|
||||
- name: "Open Ursula node port"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
shell: 'iptables -A INPUT -p tcp -m conntrack --dport {{ seed_node_metadata.rest_port }} --ctstate NEW,ESTABLISHED -j ACCEPT'
|
||||
|
||||
- name: "Render Lonely Node Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/firstula_node.j2
|
||||
dest: /etc/systemd/system/ursula_node.service
|
||||
mode: 0755
|
||||
vars:
|
||||
virtualenv_path: '/home/ubuntu/venv'
|
||||
nucypher_network_domain: "{{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
run_once: true
|
||||
when: stat_result.stat.exists == False
|
||||
|
||||
- name: "Render Subsequent Ursula Node Services"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
template:
|
||||
src: ../../services/ursula_node.j2
|
||||
dest: /etc/systemd/system/ursula_node.service
|
||||
mode: 0755
|
||||
vars:
|
||||
virtualenv_path: '/home/ubuntu/venv'
|
||||
nucypher_network_domain: "{{ lookup('env', 'NUCYPHER_NETWORK_NAME') }}"
|
||||
when:
|
||||
- stat_result.stat.exists == False
|
||||
- inventory_hostname != seed_node_metadata.rest_host
|
||||
|
||||
- name: "Enable and Start Ursula Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "ursula_node"
|
|
@ -1,35 +0,0 @@
|
|||
- name: "Update Application Code on Seednodes"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_ursulas' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
pre_tasks:
|
||||
- include_vars: "{{ lookup('env', 'ANSIBLE_VARIABLES') }}"
|
||||
tasks:
|
||||
|
||||
- git:
|
||||
repo: "{{ git_repo }}"
|
||||
version: "{{ git_version }}"
|
||||
dest: ./code
|
||||
clone: no
|
||||
update: yes
|
||||
|
||||
- pip:
|
||||
chdir: ./code
|
||||
name: '.'
|
||||
editable: true
|
||||
executable: pip3
|
||||
environment:
|
||||
LC_ALL: en_US.UTF-8
|
||||
LANG: en_US.UTF-8
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
|
||||
- name: "Restart Ursula Service"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
no_block: yes
|
||||
enabled: yes
|
||||
state: restarted
|
||||
name: "ursula_node"
|
|
@ -1,17 +0,0 @@
|
|||
- name: "Start Ursulas"
|
||||
hosts: "{{ 'tag_Role_' + lookup('env', 'NUCYPHER_NETWORK_NAME') + '_ursulas' }}"
|
||||
user: ubuntu
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: "Delete NuCypher Configuration Files and Directories (Root)"
|
||||
file:
|
||||
state: absent
|
||||
path: "~/.local/share/nucypher/"
|
||||
become: yes
|
||||
become_flags: "-H -S"
|
||||
|
||||
- name: "Delete NuCypher Configuration Files and Directories (User)"
|
||||
file:
|
||||
state: absent
|
||||
path: "~/.local/share/nucypher/"
|
|
@ -1,17 +0,0 @@
|
|||
#
|
||||
# Nucypher Ansible Variables (EC2 Provisioning) Template
|
||||
#
|
||||
STATE_RES_NAME: "${STATE_RES_NAME}"
|
||||
ec2_region: "${EC2_REGION}"
|
||||
ec2_instance_type: "${EC2_INSTANCE_TYPE}"
|
||||
ec2_image: "${EC2_IMAGE_ID}"
|
||||
ec2_keypair: "${EC2_KEYPAIR_NAME}"
|
||||
ec2_volume_size: "${EC2_VOLUME_SIZE_GB}"
|
||||
ec2_count: '${EC2_PROVISION_QUANTITY}'
|
||||
ec2_security_group_id: "${EC2_SECURITY_GROUP_ID}"
|
||||
ec2_subnet_id: "${EC2_SUBNET_ID}"
|
||||
ec2_tag_Type: "${EC2_TAG_TYPE}"
|
||||
aws_pem_path: "${AWS_PEM_PATH}"
|
||||
git_repo: "${GIT_REPOSITORY_URL}"
|
||||
git_version: "${GIT_VERSION}"
|
||||
nucypher_role: 'seednodes'
|
|
@ -1,26 +0,0 @@
|
|||
#### single command line worker deployment
|
||||
|
||||
1. provision ubuntu hosts accessible from the internet and to which you can ssh into.
|
||||
* if you need to use a .pem file, use the "amazon" example (in /nucypher/deploy/ansible/worker/inventory.yml)
|
||||
|
||||
2. if you would like to pre-create your worker accounts, use a locally running geth instance to create accounts for the workers you'll be deploying (you will need the keystores), otherwise worker accounts you can have workers created automatically as the nodes are deployed
|
||||
|
||||
3. follow the instructions here https://docs.nucypher.com/en/latest/guides/network_node/staking_guide.html
|
||||
|
||||
4. modify the contents of [inventory.yml](inventory.yml) to add your worker addresses, staker addresses, and passwords, as well as the addresses of your host(s) and save it somewhere
|
||||
|
||||
5. ensure that you have installed nucypher with development tools. `pip install -r dev-requirements.txt`
|
||||
|
||||
6. from /nucypher/deploy/ansible/worker run `ansible-playbook worker/setup_remote_workers.yml -i ~/my-gemini-nodes.yml`
|
||||
|
||||
#### single command line worker UPDATE
|
||||
|
||||
updates all your existing nodes to the latest nucypher docker image
|
||||
|
||||
1. from `/nucypher/deploy/ansible/` run `ansible-playbook worker/update_remote_workers.yml -i ~/my-gemini-nodes.yml`
|
||||
|
||||
|
||||
#### other commands to try
|
||||
|
||||
* `ansible-playbook worker/get_workers_status.yml -i ~/my-gemini-nodes.yml`
|
||||
* prints out some useful information about your nodes
|
|
@ -1,5 +0,0 @@
|
|||
- name: "Backup Remote Worker Data"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/backup_ursula_data.yml
|
|
@ -1,5 +0,0 @@
|
|||
- name: "Get All Worker Status"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/check_running_ursula.yml
|
|
@ -1,20 +0,0 @@
|
|||
all:
|
||||
children:
|
||||
nucypher:
|
||||
children:
|
||||
mainnet:
|
||||
children:
|
||||
nodes:
|
||||
vars:
|
||||
network_name: "mainnet"
|
||||
geth_options: "--mainnet"
|
||||
geth_dir: '/home/nucypher/geth/.ethereum/mainnet/'
|
||||
geth_container_geth_datadir: "/root/.ethereum/mainnet"
|
||||
nucypher_container_geth_datadir: "/root/.local/share/geth/.ethereum/mainnet"
|
||||
etherscan_domain: mainnet.etherscan.io
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
ansible_connection: ssh
|
||||
ansible_ssh_private_key_file: <PEM FILE HERE>
|
||||
hosts:
|
||||
<IP ADDRESS HERE>:
|
||||
default_user: ubuntu
|
|
@ -1,55 +0,0 @@
|
|||
- name: "Create local backup of remote Ursula data"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: find keystore files
|
||||
find:
|
||||
paths: "{{geth_dir}}keystore"
|
||||
register: keystore_files
|
||||
|
||||
- name: find Ursula keystore
|
||||
become: yes
|
||||
find:
|
||||
paths: /home/nucypher/nucypher/keystore/
|
||||
register: keystore
|
||||
|
||||
- name: find Ursula database files
|
||||
find:
|
||||
paths: /home/nucypher/nucypher/ursula.db
|
||||
register: database_files
|
||||
|
||||
- name: "Backup Worker Nucypher Keystore locally to: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
fetch:
|
||||
src: "{{item.path}}"
|
||||
dest: "{{deployer_config_path}}/remote_worker_backups/"
|
||||
with_items: "{{keystore_files.files}}"
|
||||
|
||||
- name: "Backup remote worker config files: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
fetch:
|
||||
src: "{{item}}"
|
||||
dest: "{{deployer_config_path}}/remote_worker_backups/"
|
||||
with_items:
|
||||
- "/home/nucypher/nucypher/ursula.json"
|
||||
- "{{geth_dir}}account.txt"
|
||||
|
||||
- name: "Backup NuCypher Keystores locally to: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
# become_user: nucypher
|
||||
fetch:
|
||||
src: "{{item.path}}"
|
||||
dest: "{{deployer_config_path}}/remote_worker_backups/"
|
||||
with_items: "{{keystore.files}}"
|
||||
|
||||
- name: "Backup ursula.db to: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
# become_user: nucypher
|
||||
fetch:
|
||||
src: "{{item.path}}"
|
||||
dest: "{{deployer_config_path}}/remote_worker_backups/"
|
||||
with_items: "{{database_files.files}}"
|
|
@ -1,67 +0,0 @@
|
|||
- name: "Ursula Status"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: Get public ip
|
||||
uri:
|
||||
url: http://ifconfig.me/ip
|
||||
return_content: yes
|
||||
register: ip_response
|
||||
|
||||
- name: "Get LogPath"
|
||||
become: yes
|
||||
shell:
|
||||
cmd: docker ps --no-trunc | grep ursula | cut -f 1 -d " "
|
||||
register: ursula_container_name
|
||||
|
||||
- name: "Wait for Ursula Log"
|
||||
become: yes
|
||||
lineinfile:
|
||||
dest: "/var/lib/docker/containers/{{ursula_container_name['stdout']}}/{{ursula_container_name['stdout']}}-json.log"
|
||||
line: "Working ~ Keep Ursula Online!"
|
||||
check_mode: yes
|
||||
register: serving
|
||||
|
||||
- name: "Read Ursula Log"
|
||||
become: yes
|
||||
command: docker logs ursula
|
||||
register: ursula_logs
|
||||
|
||||
- name: "Get Current running Image"
|
||||
become: yes
|
||||
command: sudo docker ps --no-trunc --format \"\{\{.Image\}\}\"
|
||||
register: running_docker_image
|
||||
|
||||
- name: "Get Current running Command"
|
||||
become: yes
|
||||
command: sudo docker ps --no-trunc --format \"\{\{.Command\}\}\"
|
||||
register: running_docker_command
|
||||
|
||||
- name: "Request Ursula Status"
|
||||
become: yes
|
||||
uri:
|
||||
url: "https://{{ip_response.content}}:9151/status/?json=true"
|
||||
validate_certs: no
|
||||
register: status_data
|
||||
ignore_errors: yes
|
||||
when: serving
|
||||
|
||||
- name: Print Ursula Status Data
|
||||
ignore_errors: no
|
||||
debug:
|
||||
msg:
|
||||
"local nickname: {{host_nickname}}\n
|
||||
{% if serving and 'json' in status_data %}nickname: {{status_data.json.nickname.text}}\n
|
||||
staker address: {{status_data.json.staker_address}}\n
|
||||
worker address: {{status_data.json.worker_address}}\n
|
||||
rest url: https://{{status_data.json.rest_url}}\n
|
||||
\tversion: {{status_data.json.version}}\n
|
||||
\tmissing commitments: {{status_data.json.missing_commitments}}\n
|
||||
\tlast committed period: {{status_data.json.last_committed_period}}\n
|
||||
\tETH: {{status_data.json.balance_eth}}\n{% endif %}
|
||||
\tprovider: {{blockchain_provider}}\n
|
||||
\tursula docker image: {{running_docker_image.stdout}}\n
|
||||
\tursula command: {{running_docker_command.stdout}}\n
|
||||
\tlast log line: {{ursula_logs['stdout_lines'][-1]}}\n"
|
|
@ -1,76 +0,0 @@
|
|||
- name: "Setup Nucypher"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: Recursively change ownership of geth directory
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/geth
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: nucypher
|
||||
|
||||
- name: Stop any running Ursulas
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
docker_container:
|
||||
name: ursula
|
||||
state: stopped
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Keep disk space clean by pruning unneeded docker debris
|
||||
become: yes
|
||||
docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
|
||||
- name: "pull {{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
become: yes
|
||||
docker_image:
|
||||
name: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- name: "check if /home/nucypher/nucypher/ursula.json exists"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
stat:
|
||||
path: /home/nucypher/nucypher/ursula.json
|
||||
register: ursula_check
|
||||
|
||||
- name: find keystore file
|
||||
find:
|
||||
paths: "{{geth_dir}}keystore"
|
||||
register: keystore_output
|
||||
|
||||
- name: store signer options
|
||||
set_fact:
|
||||
signer_options: "--signer keystore://{{nucypher_container_geth_datadir}}/keystore/{{keystore_output.files[0].path | basename}}"
|
||||
when: node_is_decentralized is undefined or not node_is_decentralized and ursula_check.stat.exists == False
|
||||
|
||||
- name: store empty signer options
|
||||
set_fact:
|
||||
signer_options: ""
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized and ursula_check.stat.exists == False
|
||||
|
||||
- name: Find my public ip
|
||||
uri:
|
||||
url: http://ifconfig.me/ip
|
||||
return_content: yes
|
||||
register: ip_response
|
||||
|
||||
- name: "init Ursula worker"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
when: ursula_check.stat.exists == False
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYSTORE_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula init --eth-provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}}"
|
||||
environment:
|
||||
NUCYPHER_KEYSTORE_PASSWORD: "{{runtime_envvars['NUCYPHER_KEYSTORE_PASSWORD']}}"
|
|
@ -1,100 +0,0 @@
|
|||
- name: "Setup Ethereum"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "remove existing nucypher config data"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/nucypher/
|
||||
state: absent
|
||||
when: wipe_nucypher_config is not undefined and wipe_nucypher_config
|
||||
|
||||
- name: "create geth keystore directory"
|
||||
become: yes
|
||||
file:
|
||||
path: "{{geth_dir}}keystore"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: "copy local geth account"
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{WORKER_ACCT_KEYSTORE_PATH}}"
|
||||
dest: "{{geth_dir}}keystore"
|
||||
owner: nucypher
|
||||
when: WORKER_ACCT_KEYSTORE_PATH is not undefined
|
||||
|
||||
- name: "check for existing account"
|
||||
become: yes
|
||||
stat:
|
||||
path: "{{geth_dir}}account.txt"
|
||||
register: account_file
|
||||
|
||||
- name: "echo account_file_exists"
|
||||
debug:
|
||||
verbosity: 0
|
||||
var: account_file.stat.exists
|
||||
|
||||
- name: "get account address from file"
|
||||
become: yes
|
||||
command: 'cat {{geth_dir}}account.txt'
|
||||
register: remotely_created_account
|
||||
when: account_file.stat.exists and NUCYPHER_WORKER_ADDRESS is undefined
|
||||
|
||||
- name: "create password file"
|
||||
become: yes
|
||||
copy:
|
||||
content: "{{runtime_envvars['NUCYPHER_WORKER_ETH_PASSWORD']}}"
|
||||
dest: "/home/nucypher/geth/password.txt"
|
||||
owner: nucypher
|
||||
when: WORKER_ACCT_KEYSTORE_PATH is undefined and not account_file.stat.exists
|
||||
|
||||
- name: "echo nucypher_container_geth_datadir"
|
||||
debug:
|
||||
verbosity: 0
|
||||
var: nucypher_container_geth_datadir
|
||||
|
||||
# create a local geth account if one doesn't exist
|
||||
- name: "create new account and capture the address"
|
||||
become: yes
|
||||
shell:
|
||||
cmd: 'docker run -v /home/nucypher/geth:/root ethereum/client-go:latest account new --password /root/password.txt --datadir {{geth_container_geth_datadir}} | grep "Public address of the key:" | cut -d":" -f2- | xargs'
|
||||
register: new_geth_account_checksum
|
||||
when: not account_file.stat.exists
|
||||
|
||||
# now remove the password.txt needed in the previous step
|
||||
- name: "ensure password file is deleted"
|
||||
become: yes
|
||||
when: new_geth_account_checksum is not undefined
|
||||
file:
|
||||
path: "/home/nucypher/geth/password.txt"
|
||||
state: absent
|
||||
|
||||
- name: "write new address to file for later use"
|
||||
become: yes
|
||||
when: not account_file.stat.exists and new_geth_account_checksum is not undefined
|
||||
copy:
|
||||
content: "{{new_geth_account_checksum.stdout}}"
|
||||
dest: "{{geth_dir}}account.txt"
|
||||
|
||||
- name: "write declared worker address to file if it exists"
|
||||
become: yes
|
||||
when: NUCYPHER_WORKER_ADDRESS is not undefined
|
||||
copy:
|
||||
content: "{{NUCYPHER_WORKER_ADDRESS}}"
|
||||
dest: "{{geth_dir}}account.txt"
|
||||
|
||||
- name: "get account address from file"
|
||||
become: yes
|
||||
command: 'cat {{geth_dir}}account.txt'
|
||||
register: active_account
|
||||
|
||||
- name: "echo worker address"
|
||||
debug:
|
||||
verbosity: 0
|
||||
msg: "{{inventory_hostname}}:worker address:{{ active_account.stdout }}"
|
||||
|
||||
- name: store worker address
|
||||
set_fact:
|
||||
worker_address: "{{active_account.stdout}}"
|
|
@ -1,17 +0,0 @@
|
|||
- name: "Setup Ethereum"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "create geth directory"
|
||||
become: yes
|
||||
file:
|
||||
path: /home/nucypher/geth/
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: "pull ethereum/client-go:stable"
|
||||
become: yes
|
||||
docker_image:
|
||||
name: ethereum/client-go:stable
|
||||
source: pull
|
|
@ -1,22 +0,0 @@
|
|||
- name: "Ursula Status"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: "Get LogPath"
|
||||
become: yes
|
||||
shell:
|
||||
cmd: docker ps --no-trunc | grep ursula | cut -f 1 -d " "
|
||||
register: ursula_container_name
|
||||
|
||||
- name: read log file
|
||||
become: yes
|
||||
shell:
|
||||
cmd: cat "/var/lib/docker/containers/{{ursula_container_name['stdout']}}/{{ursula_container_name['stdout']}}-json.log"
|
||||
register: log_output
|
||||
|
||||
- name: Print Ursula Log
|
||||
debug:
|
||||
msg:
|
||||
"{{log_output.stdout}}"
|
|
@ -1,22 +0,0 @@
|
|||
- name: "Run shared externally available geth node"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: "run geth {{geth_options}} forever in the background"
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth
|
||||
state: started
|
||||
restart: yes
|
||||
pull: true
|
||||
image: ethereum/client-go:stable
|
||||
restart_policy: "unless-stopped"
|
||||
command: "{{geth_options}} --http --http.addr 0.0.0.0 --http.api eth,web3,net --nousb --syncmode fast --rpcvhosts=* --cache 2000"
|
||||
volumes:
|
||||
- /home/nucypher/geth:/root
|
||||
ports:
|
||||
- "8545:8545/tcp"
|
||||
- "30303:30303"
|
||||
- "8546:8546/tcp"
|
|
@ -1,47 +0,0 @@
|
|||
- name: "Sync/Run Geth if we are running a Decentralized node"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: "check if the geth.ipc exists (if it does, we skip syncing steps)"
|
||||
become: yes
|
||||
stat:
|
||||
path: "{{geth_dir}}/geth.ipc"
|
||||
register: geth_running
|
||||
|
||||
- name: "make sure no previously running geth syncs are still running"
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth-sync
|
||||
state: absent
|
||||
when: geth_running.stat.exists == False and restarting_geth is undefined
|
||||
ignore_errors: yes
|
||||
|
||||
- name: "run geth until it finishes syncing (time to get up and go for a walk)"
|
||||
become: yes
|
||||
command: "docker run --name geth-sync -v /home/nucypher/geth:/root ethereum/client-go:latest {{geth_options}} --exitwhensynced --ipcdisable"
|
||||
async: 100000
|
||||
poll: 0
|
||||
register: geth_sync_status
|
||||
when: geth_running.stat.exists == False and restarting_geth is undefined
|
||||
|
||||
- name: 'checking every few seconds if geth is finished syncing... (this will continue even if you kill this process)'
|
||||
become: yes
|
||||
until: job_result.finished
|
||||
retries: 10000
|
||||
when: geth_sync_status is not undefined
|
||||
register: job_result
|
||||
async_status:
|
||||
jid: "{{ geth_sync_status['ansible_job_id'] }}"
|
||||
|
||||
- name: "run geth {{geth_options}} forever in the background"
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth
|
||||
state: started
|
||||
restart: yes
|
||||
image: ethereum/client-go:latest
|
||||
restart_policy: "unless-stopped"
|
||||
command: "{{geth_options}} --cache 2000"
|
||||
volumes:
|
||||
- /home/nucypher/geth:/root
|
|
@ -1,138 +0,0 @@
|
|||
- name: "Run Ursula"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: find keystore file
|
||||
find:
|
||||
paths: "{{geth_dir}}keystore"
|
||||
register: keystore_output
|
||||
|
||||
- name: store signer options
|
||||
set_fact:
|
||||
signer_options: "--signer keystore://{{nucypher_container_geth_datadir}}/keystore/{{keystore_output.files[0].path | basename}}"
|
||||
when: node_is_decentralized is undefined or not node_is_decentralized
|
||||
|
||||
- name: store empty signer options
|
||||
set_fact:
|
||||
signer_options: ""
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
|
||||
# - name: "remove known nodes"
|
||||
# become: yes
|
||||
# file:
|
||||
# path: /home/nucypher/nucypher/known_nodes/
|
||||
# state: absent
|
||||
|
||||
- name: "get account address from file"
|
||||
become: yes
|
||||
command: 'cat {{geth_dir}}account.txt'
|
||||
register: active_account
|
||||
|
||||
# - name: "ensure known nodes certificates directory"
|
||||
# become: yes
|
||||
# file:
|
||||
# path: /home/nucypher/nucypher/known_nodes/certificates
|
||||
# state: directory
|
||||
|
||||
# - name: "ensure known nodes directory"
|
||||
# become: yes
|
||||
# file:
|
||||
# path: /home/nucypher/nucypher/known_nodes/metadata
|
||||
# state: directory
|
||||
|
||||
- name: Find my public ip
|
||||
uri:
|
||||
url: http://ifconfig.me/ip
|
||||
return_content: yes
|
||||
register: ip_response
|
||||
|
||||
- name: "update Ursula worker config"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
command: "docker run -v /home/nucypher:/root/.local/share/ -e NUCYPHER_KEYSTORE_PASSWORD -it {{ nucypher_image | default('nucypher/nucypher:latest') }} nucypher ursula config --eth-provider {{ blockchain_provider }} --worker-address {{active_account.stdout}} --rest-host {{ip_response.content}} --network {{network_name}} {{nucypher_ursula_init_options | default('')}} {{signer_options}} --config-file /root/.local/share/nucypher/ursula.json"
|
||||
environment: "{{runtime_envvars}}"
|
||||
|
||||
- name: "Backup Worker Nucypher Keystore locally to: {{deployer_config_path}}/remote_worker_backups/"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
fetch:
|
||||
src: "{{keystore_output.files[0].path}}"
|
||||
dest: "{{deployer_config_path}}/remote_worker_backups/"
|
||||
|
||||
- name: "Run Staked Ursula (seed node)"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
when: SEED_NODE_URI is not undefined and inventory_hostname == SEED_NODE_URI
|
||||
docker_container:
|
||||
recreate: yes
|
||||
name: ursula
|
||||
state: started
|
||||
pull: yes
|
||||
log_driver: json-file
|
||||
log_options:
|
||||
max-size: 10m
|
||||
max-file: "5"
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
restart_policy: "unless-stopped"
|
||||
command: "nucypher ursula run {{nucypher_ursula_run_options}} --lonely"
|
||||
volumes:
|
||||
- /home/nucypher:/root/.local/share/
|
||||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env: "{{runtime_envvars}}"
|
||||
|
||||
- name: "wait a few seconds for the seed node to become available"
|
||||
when: SEED_NODE_URI is not undefined and SEED_NODE_URI
|
||||
pause:
|
||||
seconds: 15
|
||||
|
||||
- name: "Run Staked Ursula (non-seed)"
|
||||
become: yes
|
||||
become_user: nucypher
|
||||
when: inventory_hostname != SEED_NODE_URI
|
||||
docker_container:
|
||||
recreate: yes
|
||||
name: ursula
|
||||
state: started
|
||||
pull: yes
|
||||
log_driver: json-file
|
||||
log_options:
|
||||
max-size: 10m
|
||||
max-file: "5"
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
restart_policy: "unless-stopped"
|
||||
command: "nucypher ursula run {{nucypher_ursula_run_options}} {{teacher_options}}"
|
||||
volumes:
|
||||
- /home/nucypher:/root/.local/share/
|
||||
ports:
|
||||
- "9151:9151"
|
||||
- "9101:9101"
|
||||
env: "{{runtime_envvars}}"
|
||||
|
||||
|
||||
- name: "Get LogPath"
|
||||
become: yes
|
||||
shell:
|
||||
cmd: docker ps --no-trunc | grep ursula | cut -f 1 -d " "
|
||||
register: ursula_container_name
|
||||
|
||||
- name: "Read Ursula Log"
|
||||
become: yes
|
||||
command: docker logs ursula
|
||||
register: ursula_logs
|
||||
|
||||
- name: Print Ursula Log Output
|
||||
debug:
|
||||
msg:
|
||||
"{{ursula_logs['stdout']}}"
|
||||
|
||||
- name: "Wait until we see that Ursula has decrypted her keystore and gotten started"
|
||||
become: yes
|
||||
ignore_errors: yes
|
||||
wait_for:
|
||||
path: "/var/lib/docker/containers/{{ursula_container_name['stdout']}}/{{ursula_container_name['stdout']}}-json.log"
|
||||
search_regex: "External IP matches configuration"
|
||||
timeout: 30
|
|
@ -1,68 +0,0 @@
|
|||
- name: "Install Docker"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
become: yes
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Install aptitude using apt
|
||||
apt: name=aptitude state=latest update_cache=yes force_apt_get=yes
|
||||
|
||||
- name: Install required system packages
|
||||
apt: name={{ item }} state=latest update_cache=yes
|
||||
loop: [ 'apt-transport-https', 'ca-certificates', 'curl', 'software-properties-common', 'python3-pip', 'virtualenv', 'python3-setuptools', 'acl']
|
||||
|
||||
- name: Add Docker GPG apt Key
|
||||
apt_key:
|
||||
url: https://download.docker.com/linux/ubuntu/gpg
|
||||
state: present
|
||||
|
||||
- name: Add Docker Repository
|
||||
apt_repository:
|
||||
repo: deb https://download.docker.com/linux/ubuntu bionic stable
|
||||
state: present
|
||||
|
||||
- name: Update apt and install docker-ce
|
||||
apt: update_cache=yes name=docker-ce state=latest
|
||||
|
||||
- name: Install Docker Module for Python
|
||||
pip:
|
||||
name: docker
|
||||
executable: pip3
|
||||
|
||||
- name: Ensure group "docker" exists
|
||||
group:
|
||||
name: docker
|
||||
state: present
|
||||
|
||||
- name: Add the nucypher user to the docker group
|
||||
user:
|
||||
name: nucypher
|
||||
group: docker
|
||||
|
||||
- name: Download docker-compose
|
||||
get_url:
|
||||
url : https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
|
||||
dest: ~/docker-compose
|
||||
mode: 'u+x,g+x'
|
||||
|
||||
- name: Check docker-compose exists
|
||||
stat: path=~/docker-compose
|
||||
register: docker_compose
|
||||
|
||||
- name: Move docker-compose to /usr/local/bin/docker-compose
|
||||
command: mv ~/docker-compose /usr/local/bin/docker-compose
|
||||
when: docker_compose.stat.exists
|
||||
|
||||
- name: chown docker-compose so nucypher user can use it
|
||||
file:
|
||||
path: /usr/local/bin/docker-compose
|
||||
group: docker
|
||||
|
||||
- name: Ensure Docker has started
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
|
||||
- name: "wait a few seconds for the docker daemon to startup (you can ctrl-C this...)"
|
||||
pause:
|
||||
seconds: 10
|
|
@ -1,9 +0,0 @@
|
|||
- name: "Setup Nucypher User"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
become: yes
|
||||
tasks:
|
||||
- name: "create nucypher user as {{default_user}}"
|
||||
user:
|
||||
name: nucypher
|
|
@ -1,20 +0,0 @@
|
|||
- name: "Stop Geth and Ursula Containers if they are running"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Stop Ursula
|
||||
become: yes
|
||||
docker_container:
|
||||
name: ursula
|
||||
state: stopped
|
||||
image: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- set_fact: restarting_geth=True
|
||||
- name: Stop Geth
|
||||
become: yes
|
||||
docker_container:
|
||||
name: geth
|
||||
state: stopped
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
|
@ -1,20 +0,0 @@
|
|||
- name: "Update Running Ursula"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: Keep disk space clean by pruning unneeded docker debris
|
||||
become: yes
|
||||
shell: docker system prune -af
|
||||
|
||||
- name: "pull {{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
become: yes
|
||||
docker_image:
|
||||
name: "{{ nucypher_image | default('nucypher/nucypher:latest') }}"
|
||||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- import_playbook: run_geth.yml
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
- import_playbook: run_ursula.yml
|
|
@ -1,66 +0,0 @@
|
|||
all:
|
||||
children:
|
||||
nucypher:
|
||||
children:
|
||||
gemini:
|
||||
children: # add or remove children as needed
|
||||
|
||||
#### digital ocean uses default user "root" so we have to group these separately
|
||||
digitalocean: # this can be anything... "my_nodes" or "home"
|
||||
vars:
|
||||
default_user: "root"
|
||||
hosts:
|
||||
192.168.1.1:
|
||||
### By default, the worker account is auto-generated on the remote server.
|
||||
### Alternatively, you can generate it locally and copy it over.
|
||||
# WORKER_ACCT_KEYSTORE_PATH: "/Users/macperson/Library/Ethereum/goerli/keystore/UTC--2020-01-21T02-15-18.405738000Z--02e8cbf55e781ad4ca331fe5274be93814d760d0"
|
||||
# NUCYPHER_WORKER_ADDRESS: "0x02e8cbf55E781AD4cA331fe5274Be93814D760D0"
|
||||
NUCYPHER_STAKER_ADDRESS: "0xD9b6B55b005f1B23b45a9a4aC9669deFac6dAd67"
|
||||
|
||||
#### azure configures new instances with a .pem keypair based auth
|
||||
# so we need this ansible_ssh_private_key_file variable
|
||||
azure:
|
||||
vars:
|
||||
ansible_ssh_private_key_file: ~/Downloads/damon-ansible-testing.pem
|
||||
default_user: "azureuser" # default for azure deployments
|
||||
hosts:
|
||||
# add a host for each worker/staker
|
||||
50.22.41.3:
|
||||
# WORKER_ACCT_KEYSTORE_PATH: "/home/ubuntu/.ethereum/goerli/keystore/UTC--2020-01-21T02-15-33.342507000Z--d9e7eC6fddde58c739CDdbAD5c38F170F1571077"
|
||||
# NUCYPHER_WORKER_ADDRESS: "0xd9e7eC6fddde58c739CDdbAD5c38F170F1571077"
|
||||
NUCYPHER_STAKER_ADDRESS: "0x7QkaEAe8aaee6f2C810F048877fbe1FBB2B27828"
|
||||
|
||||
#### amazon configures new instances with a .pem keypair based auth
|
||||
# so we need this ansible_ssh_private_key_file variable
|
||||
amazon:
|
||||
vars:
|
||||
ansible_ssh_private_key_file: ~/Downloads/damon-ansible-testing.pem
|
||||
default_user: "ubuntu" # or root for digital ocean
|
||||
hosts:
|
||||
# add a host for each worker/staker
|
||||
gemini1.mystakednodez.com:
|
||||
### By default, the worker account is auto-generated on the remote server.
|
||||
### Alternatively, you can generate it locally and copy it over.
|
||||
# WORKER_ACCT_KEYSTORE_PATH: "/home/ubuntu/.ethereum/goerli/keystore/UTC--2020-01-21T02-15-33.342507000Z--d9e7eC6f9bB558c739CDdbAD5c38F170F1571077"
|
||||
# NUCYPHER_WORKER_ADDRESS: "0xd9e7eC6f9bB558c739CDdbAD5c38F170F1571077"
|
||||
NUCYPHER_STAKER_ADDRESS: "0x4ffaEAe86c6A6f2C810F048877fbe1FBB2B27606"
|
||||
86.75.30.9:
|
||||
# WORKER_ACCT_KEYSTORE_PATH: "/home/ubuntu/.ethereum/goerli/keystore/UTC--2020-01-21T02-15-33.342507000Z--d9e7eC6fddde58c739CDdbAD5c38F170F1571077"
|
||||
# NUCYPHER_WORKER_ADDRESS: "0xd9e7eC6fddde58c739CDdbAD5c38F170F1571077"
|
||||
NUCYPHER_STAKER_ADDRESS: "0x4ffaEAe8aaee6f2C810F048877fbe1FBB2B27606"
|
||||
|
||||
# these variables apply to everything under 'gemini'
|
||||
vars:
|
||||
network_name: gemini
|
||||
geth_options: "--goerli"
|
||||
geth_dir: '/home/nucypher/geth/.ethereum/goerli/'
|
||||
geth_container_datadir: "/root/.ethereum/goerli"
|
||||
etherscan_domain: goerli.etherscan.io
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
# these can be overridden at the instance level if desired
|
||||
NUCYPHER_KEYSTORE_PASSWORD: xxxxxxxxxxxxxxxxxxxxxxxpanda
|
||||
NUCYPHER_WORKER_ETH_PASSWORD: yyyyyyyyyyyyyyyyyyyystainpants
|
||||
#nucypher_ursula_run_options: "--debug"
|
||||
#nucypher_ursula_init_options: "--debug"
|
||||
# nucypher_image: "nucypher/nucypher:v2.0.0-beta.0"
|
|
@ -1,81 +0,0 @@
|
|||
- import_playbook: include/setup_user.yml
|
||||
- import_playbook: include/setup_docker.yml
|
||||
- import_playbook: include/stop_containers.yml
|
||||
|
||||
- name: "Restore from local backup of remote Ursula data"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
|
||||
- name: Remove Existing Data
|
||||
become: yes
|
||||
file:
|
||||
state: absent
|
||||
path: "{{item}}"
|
||||
with_items:
|
||||
- "{{geth_dir}}keystore"
|
||||
- /home/nucypher/nucypher/ursula.db
|
||||
- /home/nucypher/nucypher/keystore/
|
||||
- "{{geth_dir}}account.txt"
|
||||
- home/nucypher/nucypher/ursula.json
|
||||
|
||||
- name: Ensure directories exist
|
||||
become: yes
|
||||
file:
|
||||
state: directory
|
||||
path: "{{item}}"
|
||||
with_items:
|
||||
- "{{geth_dir}}keystore"
|
||||
- /home/nucypher/nucypher/ursula.db
|
||||
- /home/nucypher/nucypher/keystore
|
||||
|
||||
- name: Restore Geth Keystore
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{geth_dir}}keystore/"
|
||||
owner: "nucypher"
|
||||
mode: 0600
|
||||
with_fileglob:
|
||||
- "{{restore_path}}{{geth_dir}}keystore/*"
|
||||
|
||||
- name: Restore keystore
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: /home/nucypher/nucypher/keystore
|
||||
owner: "nucypher"
|
||||
mode: 0600
|
||||
with_fileglob:
|
||||
- "{{restore_path}}/home/nucypher/nucypher/keystore/*"
|
||||
|
||||
- name: Restore Ursula database files
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: /home/nucypher/nucypher/ursula.db/
|
||||
owner: "nucypher"
|
||||
mode: 0600
|
||||
with_fileglob:
|
||||
- "{{restore_path}}/home/nucypher/nucypher/ursula.db/*"
|
||||
|
||||
- name: Restore Ursula Config
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{restore_path}}/home/nucypher/nucypher/ursula.json"
|
||||
dest: /home/nucypher/nucypher/
|
||||
owner: "nucypher"
|
||||
mode: 0600
|
||||
|
||||
- name: Restore Checksum
|
||||
become: yes
|
||||
copy:
|
||||
src: "{{restore_path}}{{geth_dir}}account.txt"
|
||||
dest: "{{geth_dir}}account.txt"
|
||||
owner: "nucypher"
|
||||
mode: 0600
|
||||
|
||||
- import_playbook: include/update_existing_ursula.yml
|
||||
- import_playbook: include/check_running_ursula.yml
|
||||
- import_playbook: include/backup_ursula_data.yml
|
|
@ -1,14 +0,0 @@
|
|||
- name: "Setup Remote Worker"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/setup_user.yml
|
||||
- import_playbook: include/setup_docker.yml
|
||||
- import_playbook: include/install_geth.yml
|
||||
- import_playbook: include/init_worker.yml
|
||||
- import_playbook: include/run_geth.yml
|
||||
when: node_is_decentralized is not undefined and node_is_decentralized
|
||||
- import_playbook: include/init_ursula.yml
|
||||
- import_playbook: include/run_ursula.yml
|
||||
- import_playbook: include/check_running_ursula.yml
|
||||
- import_playbook: include/backup_ursula_data.yml
|
|
@ -1,8 +0,0 @@
|
|||
- name: "Setup Remote Geth"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/setup_user.yml
|
||||
- import_playbook: include/setup_docker.yml
|
||||
- import_playbook: include/install_geth.yml
|
||||
- import_playbook: include/run_external_geth.yml
|
|
@ -1,5 +0,0 @@
|
|||
- name: "Update Remote Workers to latest NuCypher, Geth"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/stop_containers.yml
|
|
@ -1,8 +0,0 @@
|
|||
- name: "Update Remote Workers to latest NuCypher, Geth"
|
||||
hosts: "{{ play_hosts }}"
|
||||
remote_user: "{{default_user}}"
|
||||
|
||||
- import_playbook: include/stop_containers.yml
|
||||
- import_playbook: include/update_existing_ursula.yml
|
||||
- import_playbook: include/check_running_ursula.yml
|
||||
- import_playbook: include/backup_ursula_data.yml
|
|
@ -1,13 +0,0 @@
|
|||
FROM nucypher/rust-python:3.8.12
|
||||
|
||||
# Update
|
||||
RUN apt-get update -y && apt upgrade -y
|
||||
RUN apt-get install patch gcc libffi-dev wget git -y
|
||||
|
||||
WORKDIR /code
|
||||
COPY . /code
|
||||
|
||||
# Porter requirements
|
||||
RUN pip3 install .[porter]
|
||||
|
||||
CMD ["/bin/bash"]
|
|
@ -1,58 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
porter-http:
|
||||
restart: on-failure
|
||||
image: porter:latest
|
||||
container_name: porter-http
|
||||
build:
|
||||
context: ../../..
|
||||
dockerfile: deploy/docker/porter/Dockerfile
|
||||
ports:
|
||||
# Default Porter port
|
||||
- "80:9155"
|
||||
volumes:
|
||||
- .:/code
|
||||
- ~/.local/share/nucypher:/nucypher
|
||||
command: ["nucypher", "porter", "run",
|
||||
"--eth-provider", "${WEB3_PROVIDER_URI}",
|
||||
"--network", "${NUCYPHER_NETWORK}",
|
||||
"--allow-origins", "${PORTER_CORS_ALLOW_ORIGINS}"] # empty string if env var not defined which translates to CORS not enabled by default
|
||||
|
||||
porter-https:
|
||||
restart: on-failure
|
||||
image: porter:latest
|
||||
container_name: porter-https
|
||||
ports:
|
||||
# Default Porter port
|
||||
- "443:9155"
|
||||
volumes:
|
||||
- .:/code
|
||||
- ~/.local/share/nucypher:/nucypher
|
||||
- "${TLS_DIR}:/etc/porter/tls/"
|
||||
command: [ "nucypher", "porter", "run",
|
||||
"--eth-provider", "${WEB3_PROVIDER_URI}",
|
||||
"--network", "${NUCYPHER_NETWORK}",
|
||||
"--tls-key-filepath", "/etc/porter/tls/key.pem",
|
||||
"--tls-certificate-filepath", "/etc/porter/tls/cert.pem",
|
||||
"--allow-origins", "${PORTER_CORS_ALLOW_ORIGINS}"] # empty string if env var not defined which translates to CORS not enabled by default
|
||||
|
||||
porter-https-auth:
|
||||
restart: on-failure
|
||||
image: porter:latest
|
||||
container_name: porter-https-auth
|
||||
ports:
|
||||
# Default Porter port
|
||||
- "443:9155"
|
||||
volumes:
|
||||
- .:/code
|
||||
- ~/.local/share/nucypher:/nucypher
|
||||
- "${TLS_DIR}:/etc/porter/tls/"
|
||||
- "${HTPASSWD_FILE}:/etc/porter/auth/htpasswd"
|
||||
command: [ "nucypher", "porter", "run",
|
||||
"--eth-provider", "${WEB3_PROVIDER_URI}",
|
||||
"--network", "${NUCYPHER_NETWORK}",
|
||||
"--tls-key-filepath", "/etc/porter/tls/key.pem",
|
||||
"--tls-certificate-filepath", "/etc/porter/tls/cert.pem",
|
||||
"--basic-auth-filepath", "/etc/porter/auth/htpasswd",
|
||||
"--allow-origins", "${PORTER_CORS_ALLOW_ORIGINS}"] # empty string if env var not defined which translates to CORS not enabled by default
|
|
@ -1,4 +0,0 @@
|
|||
FROM nginxproxy/nginx-proxy:alpine
|
||||
|
||||
# Copy porter.local virtual host location configuration file
|
||||
COPY ./deploy/docker/porter/nginx/porter.local_location /etc/nginx/vhost.d/
|
|
@ -1,37 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
nginx-proxy:
|
||||
restart: always
|
||||
image: nginxproxy/nginx-proxy:alpine
|
||||
build:
|
||||
context: ../../../..
|
||||
dockerfile: deploy/docker/porter/nginx/Dockerfile
|
||||
ports:
|
||||
- "443:443"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
# because of the vhost name used below, the cert and key should be named "porter.local.crt" and "porter.local.key" respectively
|
||||
- "${TLS_DIR}:/etc/nginx/certs/"
|
||||
|
||||
nginx-porter:
|
||||
restart: on-failure
|
||||
image: porter:latest
|
||||
build:
|
||||
context: ../../../..
|
||||
dockerfile: deploy/docker/porter/Dockerfile
|
||||
expose:
|
||||
# Default Porter port
|
||||
- "9155"
|
||||
volumes:
|
||||
- .:/code
|
||||
- ~/.local/share/nucypher:/nucypher
|
||||
command: [ "nucypher", "porter", "run",
|
||||
"--eth-provider", "${WEB3_PROVIDER_URI}",
|
||||
"--network", "${NUCYPHER_NETWORK}" ]
|
||||
environment:
|
||||
- VIRTUAL_HOST=porter.local
|
||||
- VIRTUAL_PORT=9155
|
||||
depends_on:
|
||||
- nginx-proxy
|
|
@ -1,27 +0,0 @@
|
|||
set $allow_origin "";
|
||||
|
||||
#
|
||||
# Allow CORS for any domain by default - comment out if not desired
|
||||
#
|
||||
if ($http_origin ~* (.*)) {
|
||||
set $allow_origin "true";
|
||||
}
|
||||
|
||||
#
|
||||
# Allow CORS for specific domain. For specifying conditions, see https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#if.
|
||||
# Uncomment and edit if desired. There can be one or more of these 'if' directives for various origin checks.
|
||||
#
|
||||
#if ($http_origin ~* (.*\.yourdomain\.com$)) {
|
||||
# set $allow_origin "true";
|
||||
#}
|
||||
|
||||
#
|
||||
# For multiple top-level domains:
|
||||
#
|
||||
#if ($http_origin ~* (.*\.yourdomain\.(com|org)$)) {
|
||||
# set $allow_origin "true";
|
||||
#}
|
||||
|
||||
if ($allow_origin = "true") {
|
||||
add_header 'Access-Control-Allow-Origin' '$http_origin';
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"coinbase" : "0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1",
|
||||
"difficulty" : "10000",
|
||||
"extraData" : "0x",
|
||||
"gasLimit" : "8000000",
|
||||
"nonce" : "0x0112358132134550",
|
||||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"alloc": {
|
||||
"0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1": {"balance": "100000000000000000000000"}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 112358,
|
||||
"homesteadBlock": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Create custom blockchain storage area
|
||||
rm -rf ./chaindata
|
||||
mkdir ./chaindata
|
||||
|
||||
# Create an Account
|
||||
echo $GETH_PASSWORD > ./password.txt
|
||||
|
||||
geth account new \
|
||||
--datadir ./chaindata \
|
||||
--password ./password.txt
|
||||
|
||||
# Render the Genesis Template <-TODO
|
||||
|
||||
# Init the new blockchain
|
||||
geth --datadir ./chaindata \
|
||||
--identity "NuCypherTestnet" \
|
||||
--networkid 112358 \
|
||||
init custom_genesis.json \
|
|
@ -1,10 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Install Geth
|
||||
sudo apt-get install software-properties-common -y
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install ethereum -y
|
||||
|
||||
# Verify Installation
|
||||
geth --version
|
|
@ -1,10 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
mkdir ./bootnode
|
||||
|
||||
# Create a bootnode
|
||||
bootnode --genkey ./bootnode/bootnode.key --verbosity 6
|
||||
bootnode --nodekey ./bootnode/bootnode.key --writeaddress
|
||||
|
||||
# Run Bootnode
|
||||
bootnode --nodekey ./bootnode/bootnode.key --verbosity 6
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
IP="127.0.0.1"
|
||||
|
||||
geth --datadir ./chaindata \
|
||||
--networkid 112358 \
|
||||
--rpc \
|
||||
--mine \
|
||||
--minerthreads 1 \
|
||||
--bootnodes enode://f613b39e61d78f3d8af6a9b4d3b4123330358af4f7ef471d5f45a77572c498cd55469420045453227fe818e118916eb553a39050c1369f201749e0e2fef8eb47@[::1]:30301
|
||||
# --nat "extip:$IP" \
|
||||
# --etherbase=0x0000000000000000000000000000000000000000
|
||||
# --gasprice 1
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Oumuamua": ["oregon-seednodes-414af81f89776e09.elb.us-west-2.amazonaws.com",
|
||||
"eu-federated-balancer-40be4480ec380cd7.elb.eu-central-1.amazonaws.com"],
|
||||
|
||||
"devnet": ["https://18.222.119.242:9151"]
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
[Unit]
|
||||
Description="Ethereum Netstats Service"
|
||||
|
||||
[Service]
|
||||
User=ubuntu
|
||||
Type=simple
|
||||
WorkingDirectory=/home/ubuntu/code
|
||||
Environment=PORT={{ port }}
|
||||
Environment=WS_SECRET={{ secret }}
|
||||
ExecStart=/usr/bin/npm start
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,10 +0,0 @@
|
|||
[Unit]
|
||||
Description="Go Ethereum Discovery Bootnode Service"
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/bootnode --nodekey /home/ubuntu/bootnode.key --verbosity 6
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,22 +0,0 @@
|
|||
[Unit]
|
||||
Description="Geth Miner Service"
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/geth --datadir {{ datadir }} \
|
||||
--networkid {{ networkid }} \
|
||||
--port 30303 \
|
||||
--syncmode {{ syncmode }} \
|
||||
--rpc \
|
||||
--rpcaddr "{{ rpchost }}" \
|
||||
--rpcapi "eth,net,web3,miner,debug,personal,rpc,admin" \
|
||||
--rpccorsdomain "*" \
|
||||
--mine \
|
||||
--minerthreads 1 \
|
||||
--ethstats {{ nickname }}:{{ eth_netstats_secret }}@{{ eth_netstats_ip }}:{{ eth_netstats_port }} \
|
||||
--bootnodes {{ bootnode_uri }} \
|
||||
--verbosity 8 \
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,10 +0,0 @@
|
|||
[Unit]
|
||||
Description="Run 'Moe', A NuCypher Network Monitor."
|
||||
|
||||
[Service]
|
||||
User=ubuntu
|
||||
Type=simple
|
||||
ExecStart={{ virtualenv_path }}/bin/nucypher moe --network {{ nucypher_network_domain }} --teacher {{ teacher_uri }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,7 +0,0 @@
|
|||
[
|
||||
"enode://bf150c793f378775e8cf09bee4fba37ea65363fe7a41171790a80ef6462de619cad2c05f42fc58655ad317503d5da8fee898e911fdf386ac6d15da12b5e883eb@3.92.166.78:30301",
|
||||
"enode://13da3c4b5b1ca32dfb0fcd662b9c69daf6b564e6f791ddae107d57049f25952aac329de336fd393f5b42b6aa2bbb263d7aa5c426b473be611739795aa18b0212@54.173.27.77:30303",
|
||||
"enode://4f7a27820107c235bb0f8086ee1c2bad62174450ec2eec12cb29e3fa7ecb9f332710373c1d11a3115aa72f2dabbae27b73eac51f06d3df558dd9fb51007da653@52.91.112.249:30303",
|
||||
"enode://6b58a9437aa88f254b75110019c54807cf1d7da9729f2c022a2463bae86b639288909fe00ffac0599e616676eea2de3c503bacaf4be835a02195bea0b349ca80@54.88.246.77:30303",
|
||||
"enode://562051180eca42514e44b4428ed20a3cb626654631f53bbfa549de7d3b7e418376e8f784c232429d7ff01bd0597e3ce7327699bb574d39ac3b2ac1729ed0dd44@54.224.110.32:30303"
|
||||
]
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"coinbase" : "0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1",
|
||||
"difficulty" : "10000",
|
||||
"extraData" : "0x",
|
||||
"gasLimit" : "8000000",
|
||||
"nonce" : "0x0112358132134550",
|
||||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"alloc": {
|
||||
"0xA87722643685B38D37ecc7637ACA9C1E09c8C5e1": {"balance": "100000000000000000000000"}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 112358,
|
||||
"homesteadBlock": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue