Merge manifests and friends to the gh-pages branch (#4)

Co-authored-by: Anthony Lapenna <anthony.lapenna@portainer.io>
Co-authored-by: Anthony Lapenna <lapenna.anthony@gmail.com>
pull/34/head^2
David Young 2020-08-28 10:13:06 +12:00 committed by GitHub
parent 904ecce9a3
commit dc33e42cbd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1053 additions and 0 deletions

3
.ci/ct-config.yaml Normal file
View File

@ -0,0 +1,3 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
lint-conf: .ci/lint-config.yaml
chart-dirs: deploy/helm/charts

6
.ci/lint-config.yaml Normal file
View File

@ -0,0 +1,6 @@
rules:
# One blank line is OK
empty-lines:
max-start: 1
max-end: 1
max: 1

View File

@ -0,0 +1,24 @@
#!/bin/bash
#
# What is this?
# -------------
# This handy little script will generate kubernetes YAML manifests from the portainer
# helm chart. It's intended to be used to prepare up-to-date manifests for users who prefer _not_
# to use helm.
#
# How does it work?
# -----------------
# At a high level, we run helm in --dry-run mode, which causes the manifests to be rendered, but displayed
# to stdout instead of applied to Kubernetes.
# Then we perform certain transformations on these rendered manifests:
# 1. Remove the rendered NOTES
# 2. Remove the header produced by helf --dry-run
# 3. Remove references to helm in rendered manifests (no point attaching a label like "app.kubernetes.io/managed-by: Helm" if we are not!)
helm install --no-hooks --namespace zorgburger --set disableTest=true --dry-run zorgburger deploy/helm/charts/portainer \
| sed -n '1,/NOTES/p' | sed \$d \
| grep -vE 'NAME|LAST DEPLOYED|NAMESPACE|STATUS|REVISION|HOOKS|MANIFEST|TEST SUITE' \
| grep -iv helm \
| sed 's/zorgburger/portainer/' \
| sed 's/portainer-portainer/portainer/' \
> deploy/manifests/portainer/portainer.yaml

3
.ci/scripts/local-ct-lint.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
docker run --rm -it -w /repo -v `pwd`:/repo quay.io/helmpack/chart-testing ct lint --all --config=.ci/ct-config.yaml

View File

@ -0,0 +1,3 @@
#!/bin/bash
helm template charts/portainer -f .ci/values-kube-score.yaml --no-hooks | kube-score score -

View File

@ -0,0 +1,12 @@
# This file sets some opinionated values for kube-score to use
# when parsing the chart
image:
pullPolicy: Always
resources:
requests:
cpu: 1
memory: 100Mi
limits:
cpu: 1
memory: 100Mi

0
.github/workflows/bump-ci.md vendored Normal file
View File

View File

@ -0,0 +1,34 @@
name: Lint and Test Charts
on:
push:
paths:
- 'deploy/helm/charts/**'
- '.github/**'
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
- name: Run chart-testing (lint)
id: lint
uses: helm/chart-testing-action@v1.0.0
with:
config: .ci/ct-config.yaml
command: lint
- name: Create kind cluster
uses: helm/kind-action@v1.0.0
with:
install_local_path_provisioner: true
# Only build a kind cluster if there are chart changes to test.
if: steps.lint.outputs.changed == 'true'
- name: Run chart-testing (install)
uses: helm/chart-testing-action@v1.0.0
with:
command: install
config: .ci/ct-config.yaml

View File

@ -0,0 +1,32 @@
name: Publish helm chart
on:
push:
branches:
- master
paths:
- 'deploy/helm/charts/**'
- '.github/**'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Fetch history
run: git fetch --prune --unshallow
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.0.0
with:
charts_dir: deploy/helm/charts/
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,31 @@
apiVersion: v2
name: portainer
description: Helm chart used to deploy the Portainer for Kubernetes
home: https://www.portainer.io
icon: https://github.com/portainer/portainer/raw/develop/app/assets/ico/apple-touch-icon.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 1.0.0-pre1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 2.0.0
sources:
- https://github.com/portainer/k8s
maintainers:
- name: funkypenguin
email: davidy@funkypenguin.co.nz
url: https://www.funkypenguin.co.nz

View File

@ -0,0 +1,78 @@
# Deploy Portainer using Helm Chart
Before proceeding, ensure to create a namespace in advance.
For instance:
```bash
kubectl create namespace portainer
```
# Testing the Chart
Execute the following for testing the chart:
```bash
helm install --dry-run --debug portainer -n portainer deploy/helm/portainer
```
# Installing the Chart
Execute the following for installing the chart:
```bash
helm upgrade -i -n portainer portainer deploy/helm/portainer
## Refer to the output NOTES on how-to access Portainer web
## An example is attached below
NOTES:
1. Get the application URL by running these commands:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace portainer svc -w portainer'
export SERVICE_IP=$(kubectl get svc --namespace portainer portainer --template "{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}")
echo http://$SERVICE_IP:9000
http://20.40.176.8:9000
```
# Deleting the Chart
Execute the following for deleting the chart:
```bash
## Delete the Helm Chart
helm delete -n portainer portainer
## Delete the Namespace
kubectl delete namespace portainer
```
# Chart Configuration
The following table lists the configurable parameters of the Portainer chart and their default values. The values file can be found under `deploy/helm/portainer/values.yaml`.
*The parameters will be keep updating.*
| Parameter | Description | Default |
| - | - | - |
| `replicaCount` | Number of Portainer service replicas (ALWAYS set to 1) | `1` |
| `image.repository` | Portainer Docker Hub repository | `portainer/portainer-k8s-beta` |
| `image.tag` | Tag for the Portainer image; `linux-amd64` for Linux and `linux-arm` for ARM | `linux-amd64` |
| `image.pullPolicy` | Portainer image pulling policy | `IfNotPresent` |
| `imagePullSecrets` | If Portainer image requires to be in a private repository | `nil` |
| `serviceAccount.annotations` | Annotations to add to the service account | `null` |
| `serviceAccount.name` | The name of the service account to use | `portainer-sa-clusteradmin` |
| `service.type` | Service Type for the main Portainer Service; ClusterIP, NodePort and LoadBalancer | `LoadBalancer` |
| `service.httpPort` | HTTP port for accessing Portainer Web | `9000` |
| `service.httpNodePort` | Static NodePort for accessing Portainer Web. Specify only if the type is NodePort | `nil` |
| `service.edgePort` | TCP port for accessing Portainer Edge | `8000` |
| `service.edgeNodePort` | Static NodePort for accessing Portainer Edge. Specify only if the type is NodePort | `nil` |
| `ingress.enabled` | Create an ingress for Portainer | `false` |
| `ingress.annotations` | Annotations to add to the ingress. For instane, `kubernetes.io/ingress.class: nginx` | `{}` |
| `ingress.hosts.host` | URL for Portainer Web. For instance, `portainer.example.io` | `nil` |
| `ingress.hosts.paths.path` | Path for the Portainer Web. | `/` |
| `ingress.hosts.paths.port` | Port for the Portainer Web. | `9000` |
| `ingress.tls` | TLS support on ingress. Must create a secret with TLS certificates in advance | `[]` |
| `resources` | Portainer resource requests and limits | `{}` |
| `persistence.enabled` | Whether to enable data persistence | `true` |
| `persistence.existingClaim` | Name of an existing PVC to use for data persistence | `nil` |
| `persistence.size` | Size of the PVC used for persistence | `1Gi` |
| `persistence.annotations` | Annotations to apply to PVC used for persistence | `{}` |
| `persistence.storageClass` | StorageClass to apply to PVC used for persistence | `default` |
| `persistence.accessMode` | AccessMode for persistence | `ReadWriteOnce` |
| `persistence.selector` | Selector for persistence | `nil` |

View File

@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ if .port }}:{{ .port }}{{ else }}:9000{{ end }}{{.path}}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "portainer.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "portainer.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "portainer.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.httpPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "portainer.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:9000 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9000:9000
{{- end }}

View File

@ -0,0 +1,74 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "portainer.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "portainer.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "portainer.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "portainer.labels" -}}
helm.sh/chart: {{ include "portainer.chart" . }}
{{ include "portainer.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "portainer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "portainer.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "portainer.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "portainer.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Provide a pre-defined claim or a claim based on the Release
*/}}
{{- define "portainer.pvcName" -}}
{{- if .Values.persistence.existingClaim }}
{{- .Values.persistence.existingClaim }}
{{- else -}}
{{- template "portainer.fullname" . }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "portainer.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "portainer.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "portainer.serviceAccountName" . }}
volumes:
- name: "data"
persistentVolumeClaim:
claimName: {{ template "portainer.pvcName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name: data
mountPath: /data
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: tcp-edge
containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 9000
readinessProbe:
httpGet:
path: /
port: 9000
resources:
{{- toYaml .Values.resources | nindent 12 }}

View File

@ -0,0 +1,41 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "portainer.fullname" . -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path | default "/" }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ .port | default 9000 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if not .Values.persistence.existingClaim -}}
---
kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: {{ template "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- if .Values.persistence.storageClass }}
volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }}
{{- else }}
volume.alpha.kubernetes.io/storage-class: "generic"
{{- end }}
{{- if .Values.persistence.annotations }}
{{ toYaml .Values.persistence.annotations | indent 2 }}
{{ end }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
spec:
accessModes:
- {{ default "ReadWriteOnce" .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.selector }}
selector:
{{ toYaml .Values.persistence.selector | indent 4 }}
{{ end }}
{{- end }}

View File

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "portainer.fullname" . }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}
name: {{ include "portainer.serviceAccountName" . }}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.httpPort }}
targetPort: 9000
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.httpNodePort))) }}
nodePort: {{ .Values.service.httpNodePort}}
{{- end }}
- port: {{ .Values.service.edgePort }}
targetPort: 8000
protocol: TCP
name: edge
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }}
nodePort: {{ .Values.service.edgeNodePort }}
{{- end }}
selector:
{{- include "portainer.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "portainer.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if not .Values.disableTest -}}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "portainer.fullname" . }}-test-connection"
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "portainer.fullname" . }}:{{ .Values.service.httpPort }}']
restartPolicy: Never
{{ end }}

View File

@ -0,0 +1,40 @@
# Default values for portainer.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: portainer/portainer-ce
tag: latest
pullPolicy: IfNotPresent
imagePullSecrets: []
serviceAccount:
annotations: {}
name: portainer-sa-clusteradmin
service:
# Set the httpNodePort and edgeNodePort only if the type is NodePort
# For Ingress, set the type to be ClusterIP and set ingress.enabled to true
# For Cloud Providers, set the type to be LoadBalancer
type: ClusterIP
httpPort: 9000
httpNodePort:
edgePort: 8000
edgeNodePort:
ingress:
enabled: false
annotations: {}
hosts:
- host:
paths: []
tls: []
resources: {}
persistence:
size: "1Gi"
annotations: {}

View File

@ -0,0 +1,17 @@
# Agent
The manifests used to deploy the Portainer agent inside a Kubernetes cluster.
To deploy an Edge agent inside your Kubernetes cluster, it is recommended to follow the instructions available inside your Portainer instance.
# Usage
## Deploy the Portainer agent and access it via an external load balancer
If your cloud provider supports external load balancers, you can use the following command to deploy the regular Portainer agent (not Edge):
```
kubectl ... apply -f portainer-agent-k8s-lb.yaml
```
This will deploy the Portainer agent and create an external load balancer which you'll be able to use to connect to the agent on port 9001.

View File

@ -0,0 +1,95 @@
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa-clusteradmin
namespace: portainer
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: portainer-crb-clusteradmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: portainer-sa-clusteradmin
namespace: portainer
# Optional: can be added to expose the agent port 80 to associate an Edge key.
# ---
# apiVersion: v1
# kind: Service
# metadata:
# name: portainer-agent
# namespace: portainer
# spec:
# type: LoadBalancer
# selector:
# app: portainer-agent
# ports:
# - name: http
# protocol: TCP
# port: 80
# targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: portainer-agent
namespace: portainer
spec:
clusterIP: None
selector:
app: portainer-agent
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer-agent
namespace: portainer
spec:
selector:
matchLabels:
app: portainer-agent
template:
metadata:
labels:
app: portainer-agent
spec:
serviceAccountName: portainer-sa-clusteradmin
containers:
- name: portainer-agent
image: portainer/agent:latest
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: DEBUG
- name: KUBERNETES_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: EDGE
value: "1"
- name: AGENT_CLUSTER_ADDR
value: "portainer-agent"
- name: EDGE_ID
valueFrom:
configMapKeyRef:
name: portainer-agent-edge-id
key: edge.id
- name: EDGE_KEY
valueFrom:
secretKeyRef:
name: portainer-agent-edge-key
key: edge.key
ports:
- containerPort: 9001
protocol: TCP
- containerPort: 80
protocol: TCP

View File

@ -0,0 +1,80 @@
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa-clusteradmin
namespace: portainer
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: portainer-crb-clusteradmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: portainer-sa-clusteradmin
namespace: portainer
---
apiVersion: v1
kind: Service
metadata:
name: portainer-agent
namespace: portainer
spec:
type: LoadBalancer
selector:
app: portainer-agent
ports:
- name: http
protocol: TCP
port: 9001
targetPort: 9001
---
apiVersion: v1
kind: Service
metadata:
name: portainer-agent-headless
namespace: portainer
spec:
clusterIP: None
selector:
app: portainer-agent
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer-agent
namespace: portainer
spec:
selector:
matchLabels:
app: portainer-agent
template:
metadata:
labels:
app: portainer-agent
spec:
serviceAccountName: portainer-sa-clusteradmin
containers:
- name: portainer-agent
image: portainer/agent:latest
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: DEBUG
- name: AGENT_CLUSTER_ADDR
value: "portainer-agent-headless"
- name: KUBERNETES_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
ports:
- containerPort: 9001
protocol: TCP

View File

@ -0,0 +1,81 @@
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa-clusteradmin
namespace: portainer
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: portainer-crb-clusteradmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: portainer-sa-clusteradmin
namespace: portainer
---
apiVersion: v1
kind: Service
metadata:
name: portainer-agent
namespace: portainer
spec:
type: NodePort
selector:
app: portainer-agent
ports:
- name: http
protocol: TCP
port: 9001
targetPort: 9001
nodePort: 30778
---
apiVersion: v1
kind: Service
metadata:
name: portainer-agent-headless
namespace: portainer
spec:
clusterIP: None
selector:
app: portainer-agent
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer-agent
namespace: portainer
spec:
selector:
matchLabels:
app: portainer-agent
template:
metadata:
labels:
app: portainer-agent
spec:
serviceAccountName: portainer-sa-clusteradmin
containers:
- name: portainer-agent
image: portainer/agent:latest
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: DEBUG
- name: AGENT_CLUSTER_ADDR
value: "portainer-agent-headless"
- name: KUBERNETES_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
ports:
- containerPort: 9001
protocol: TCP

View File

@ -0,0 +1,76 @@
#!/usr/bin/env bash
# Script used to deploy the Portainer Edge agent inside a Kubernetes cluster.
# Requires:
# curl
# kubectl
### COLOR OUTPUT ###
ESeq="\x1b["
RCol="$ESeq"'0m' # Text Reset
# Regular Bold Underline High Intensity BoldHigh Intens Background High Intensity Backgrounds
Bla="$ESeq"'0;30m'; BBla="$ESeq"'1;30m'; UBla="$ESeq"'4;30m'; IBla="$ESeq"'0;90m'; BIBla="$ESeq"'1;90m'; On_Bla="$ESeq"'40m'; On_IBla="$ESeq"'0;100m';
Red="$ESeq"'0;31m'; BRed="$ESeq"'1;31m'; URed="$ESeq"'4;31m'; IRed="$ESeq"'0;91m'; BIRed="$ESeq"'1;91m'; On_Red="$ESeq"'41m'; On_IRed="$ESeq"'0;101m';
Gre="$ESeq"'0;32m'; BGre="$ESeq"'1;32m'; UGre="$ESeq"'4;32m'; IGre="$ESeq"'0;92m'; BIGre="$ESeq"'1;92m'; On_Gre="$ESeq"'42m'; On_IGre="$ESeq"'0;102m';
Yel="$ESeq"'0;33m'; BYel="$ESeq"'1;33m'; UYel="$ESeq"'4;33m'; IYel="$ESeq"'0;93m'; BIYel="$ESeq"'1;93m'; On_Yel="$ESeq"'43m'; On_IYel="$ESeq"'0;103m';
Blu="$ESeq"'0;34m'; BBlu="$ESeq"'1;34m'; UBlu="$ESeq"'4;34m'; IBlu="$ESeq"'0;94m'; BIBlu="$ESeq"'1;94m'; On_Blu="$ESeq"'44m'; On_IBlu="$ESeq"'0;104m';
Pur="$ESeq"'0;35m'; BPur="$ESeq"'1;35m'; UPur="$ESeq"'4;35m'; IPur="$ESeq"'0;95m'; BIPur="$ESeq"'1;95m'; On_Pur="$ESeq"'45m'; On_IPur="$ESeq"'0;105m';
Cya="$ESeq"'0;36m'; BCya="$ESeq"'1;36m'; UCya="$ESeq"'4;36m'; ICya="$ESeq"'0;96m'; BICya="$ESeq"'1;96m'; On_Cya="$ESeq"'46m'; On_ICya="$ESeq"'0;106m';
Whi="$ESeq"'0;37m'; BWhi="$ESeq"'1;37m'; UWhi="$ESeq"'4;37m'; IWhi="$ESeq"'0;97m'; BIWhi="$ESeq"'1;97m'; On_Whi="$ESeq"'47m'; On_IWhi="$ESeq"'0;107m';
printSection() {
echo -e "${BIYel}>>>> ${BIWhi}${1}${RCol}"
}
info() {
echo -e "${BIWhi}${1}${RCol}"
}
success() {
echo -e "${BIGre}${1}${RCol}"
}
error() {
echo -e "${BIRed}${1}${RCol}"
}
errorAndExit() {
echo -e "${BIRed}${1}${RCol}"
exit 1
}
### !COLOR OUTPUT ###
main() {
if [[ $# -ne 2 ]]; then
error "Not enough arguments"
error "Usage: ${0} <EDGE_ID> <EDGE_KEY>"
exit 1
fi
[[ "$(command -v curl)" ]] || errorAndExit "Unable to find curl binary. Please ensure curl is installed before running this script."
[[ "$(command -v kubectl)" ]] || errorAndExit "Unable to find kubectl binary. Please ensure kubectl is installed before running this script."
info "Downloading agent manifest..."
curl -L https://portainer.github.io/k8s/deploy/manifests/agent/portainer-agent-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest"
info "Creating Portainer namespace..."
kubectl create namespace portainer
info "Creating agent configuration..."
kubectl create configmap portainer-agent-edge-id "--from-literal=edge.id=$1" -n portainer
info "Creating agent secret..."
kubectl create secret generic portainer-agent-edge-key "--from-literal=edge.key=$2" -n portainer
info "Deploying agent..."
kubectl apply -f portainer-agent-edge-k8s.yaml || errorAndExit "Unable to deploy agent manifest"
success "Portainer Edge agent successfully deployed"
exit 0
}
main "$@"

View File

@ -0,0 +1,129 @@
---
# Source: portainer/templates/pvc.yaml
kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: portainer
namespace: portainer
annotations:
volume.alpha.kubernetes.io/storage-class: "generic"
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "2.0.0"
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
---
# Source: portainer/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa-clusteradmin
namespace: portainer
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "2.0.0"
---
# Source: portainer/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: portainer
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "2.0.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
namespace: portainer
name: portainer-sa-clusteradmin
---
# Source: portainer/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: portainer
namespace: portainer
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "2.0.0"
spec:
type: ClusterIP
ports:
- port: 9000
targetPort: 9000
protocol: TCP
name: http
- port: 8000
targetPort: 8000
protocol: TCP
name: edge
selector:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
---
# Source: portainer/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer
namespace: portainer
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "2.0.0"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
template:
metadata:
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
spec:
serviceAccountName: portainer-sa-clusteradmin
volumes:
- name: "data"
persistentVolumeClaim:
claimName: portainer
containers:
- name: portainer
image: "portainer/portainer-ce:latest"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: data
mountPath: /data
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: tcp-edge
containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 9000
readinessProbe:
httpGet:
path: /
port: 9000
resources:
{}