argo-helm/charts/argo-workflows/values.yaml

931 lines
34 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

images:
# -- Common tag for Argo Workflows images. Defaults to `.Chart.AppVersion`.
tag: ""
# -- imagePullPolicy to apply to all containers
pullPolicy: Always
# -- Secrets with credentials to pull images from a private registry
pullSecrets: []
# - name: argo-pull-secret
## Custom resource configuration
crds:
# -- Install and upgrade CRDs
install: true
# -- Keep CRDs on chart uninstall
keep: true
# -- Annotations to be added to all CRDs
annotations: {}
# -- Create clusterroles that extend existing clusterroles to interact with argo-cd crds
## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
createAggregateRoles: true
# -- String to partially override "argo-workflows.fullname" template
nameOverride:
# -- String to fully override "argo-workflows.fullname" template
fullnameOverride:
# -- Override the namespace
# @default -- `.Release.Namespace`
namespaceOverride: ""
# -- Labels to set on all resources
commonLabels: {}
# -- Override the Kubernetes version, which is used to evaluate certain manifests
kubeVersionOverride: ""
# Override APIVersions
apiVersionOverrides:
# -- String to override apiVersion of autoscaling rendered by this helm chart
autoscaling: "" # autoscaling/v2
# -- String to override apiVersion of GKE resources rendered by this helm chart
cloudgoogle: "" # cloud.google.com/v1
# -- String to override apiVersion of monitoring CRDs (ServiceMonitor) rendered by this helm chart
monitoring: "" # monitoring.coreos.com/v1
# -- Restrict Argo to operate only in a single namespace (the namespace of the
# Helm release) by apply Roles and RoleBindings instead of the Cluster
# equivalents, and start workflow-controller with the --namespaced flag. Use it
# in clusters with strict access policy.
singleNamespace: false
workflow:
# -- Deprecated; use controller.workflowNamespaces instead.
namespace:
serviceAccount:
# -- Specifies whether a service account should be created
create: false
# -- Labels applied to created service account
labels: {}
# -- Annotations applied to created service account
annotations: {}
# -- Service account which is used to run workflows
name: "argo-workflow"
# -- Secrets with credentials to pull images from a private registry. Same format as `.Values.images.pullSecrets`
pullSecrets: []
rbac:
# -- Adds Role and RoleBinding for the above specified service account to be able to run workflows.
# A Role and Rolebinding pair is also created for each namespace in controller.workflowNamespaces (see below)
create: true
# -- Extra service accounts to be added to the RoleBinding
serviceAccounts: []
# - name: my-service-account
# namespace: my-namespace
controller:
image:
# -- Registry to use for the controller
registry: quay.io
# -- Registry to use for the controller
repository: argoproj/workflow-controller
# -- Image tag for the workflow controller. Defaults to `.Values.images.tag`.
tag: ""
# -- parallelism dictates how many workflows can be running at the same time
parallelism:
# -- Globally limits the rate at which pods are created.
# This is intended to mitigate flooding of the Kubernetes API server by workflows with a large amount of
# parallel nodes.
resourceRateLimit: {}
# limit: 10
# burst: 1
rbac:
# -- Adds Role and RoleBinding for the controller.
create: true
# -- Allows controller to get, list, and watch certain k8s secrets
secretWhitelist: []
# -- Allows controller to get, list and watch all k8s secrets. Can only be used if secretWhitelist is empty.
accessAllSecrets: false
# -- Allows controller to create and update ConfigMaps. Enables memoization feature
writeConfigMaps: false
configMap:
# -- Create a ConfigMap for the controller
create: true
# -- ConfigMap name
name: ""
# -- ConfigMap annotations
annotations: {}
# -- Limits the maximum number of incomplete workflows in a namespace
namespaceParallelism:
# -- Resolves ongoing, uncommon AWS EKS bug: https://github.com/argoproj/argo-workflows/pull/4224
initialDelay:
# -- deploymentAnnotations is an optional map of annotations to be applied to the controller Deployment
deploymentAnnotations: {}
# -- podAnnotations is an optional map of annotations to be applied to the controller Pods
podAnnotations: {}
# -- Optional labels to add to the controller pods
podLabels: {}
# -- SecurityContext to set on the controller pods
podSecurityContext: {}
# podPortName: http
metricsConfig:
# -- Enables prometheus metrics server
enabled: false
# -- Path is the path where metrics are emitted. Must start with a "/".
path: /metrics
# -- Frequency at which prometheus scrapes metrics
interval: 30s
# -- Port is the port where metrics are emitted
port: 9090
# -- How often custom metrics are cleared from memory
metricsTTL: ""
# -- Flag that instructs prometheus to ignore metric emission errors.
ignoreErrors: false
# -- Flag that use a self-signed cert for TLS
secure: false
# -- Container metrics port name
portName: metrics
# -- Service metrics port
servicePort: 8080
# -- Service metrics port name
servicePortName: metrics
# -- Flag to enable headless service
headlessService: false
# -- When true, honorLabels preserves the metrics labels when they collide with the targets labels.
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#honorlabels
honorLabels: false
# -- ServiceMonitor relabel configs to apply to samples before scraping
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
relabelings: []
# -- ServiceMonitor metric relabel configs to apply to samples before ingestion
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
metricRelabelings: []
# -- ServiceMonitor will add labels from the service to the Prometheus metric
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec
targetLabels: []
# -- the controller container's securityContext
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- enable Workflow Archive to store the status of workflows. Postgres and MySQL (>= 5.7.8) are available.
## Ref: https://argo-workflows.readthedocs.io/en/stable/workflow-archive/
persistence: {}
# connectionPool:
# maxIdleConns: 100
# maxOpenConns: 0
# # save the entire workflow into etcd and DB
# nodeStatusOffLoad: false
# # enable archiving of old workflows
# archive: false
# postgresql:
# host: localhost
# port: 5432
# database: postgres
# tableName: argo_workflows
# # the database secrets must be in the same namespace of the controller
# userNameSecret:
# name: argo-postgres-config
# key: username
# passwordSecret:
# name: argo-postgres-config
# key: password
# ssl: true
# # sslMode must be one of: disable, require, verify-ca, verify-full
# # you can find more information about those ssl options here: https://godoc.org/github.com/lib/pq
# sslMode: require
# mysql:
# host: localhost
# port: 3306
# database: argo
# tableName: argo_workflows
# userNameSecret:
# name: argo-mysql-config
# key: username
# passwordSecret:
# name: argo-mysql-config
# key: password
# -- Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level.
# Only valid for 2.7+
## See more: https://argo-workflows.readthedocs.io/en/stable/default-workflow-specs/
workflowDefaults: {}
# spec:
# ttlStrategy:
# secondsAfterCompletion: 86400
# # Ref: https://argo-workflows.readthedocs.io/en/stable/artifact-repository-ref/
# artifactRepositoryRef:
# configMap: my-artifact-repository # default is "artifact-repositories"
# key: v2-s3-artifact-repository # default can be set by the `workflows.argoproj.io/default-artifact-repository` annotation in config map.
# -- Number of workflow workers
workflowWorkers: # 32
# -- Number of workflow TTL workers
workflowTTLWorkers: # 4
# -- Number of pod cleanup workers
podCleanupWorkers: # 4
# -- Number of cron workflow workers
# Only valid for 3.5+
cronWorkflowWorkers: # 8
# -- Restricts the Workflows that the controller will process.
# Only valid for 2.9+
workflowRestrictions: {}
# templateReferencing: Strict|Secure
# telemetryConfig controls the path and port for prometheus telemetry. Telemetry is enabled and emitted in the same endpoint
# as metrics by default, but can be overridden using this config.
telemetryConfig:
# -- Enables prometheus telemetry server
enabled: false
# -- telemetry path
path: /telemetry
# -- Frequency at which prometheus scrapes telemetry data
interval: 30s
# -- telemetry container port
port: 8081
# -- How often custom metrics are cleared from memory
metricsTTL: ""
# -- Flag that instructs prometheus to ignore metric emission errors.
ignoreErrors: false
# -- Flag that use a self-signed cert for TLS
secure: false
# -- telemetry service port
servicePort: 8081
# -- telemetry service port name
servicePortName: telemetry
serviceMonitor:
# -- Enable a prometheus ServiceMonitor
enabled: false
# -- Prometheus ServiceMonitor labels
additionalLabels: {}
# -- Prometheus ServiceMonitor namespace
namespace: "" # "monitoring"
serviceAccount:
# -- Create a service account for the controller
create: true
# -- Service account name
name: ""
# -- Labels applied to created service account
labels: {}
# -- Annotations applied to created service account
annotations: {}
# -- Workflow controller name string
name: workflow-controller
# -- Specify all namespaces where this workflow controller instance will manage
# workflows. This controls where the service account and RBAC resources will
# be created. Only valid when singleNamespace is false.
workflowNamespaces:
- default
instanceID:
# -- Configures the controller to filter workflow submissions
# to only those which have a matching instanceID attribute.
## NOTE: If `instanceID.enabled` is set to `true` then either `instanceID.userReleaseName`
## or `instanceID.explicitID` must be defined.
enabled: false
# -- Use ReleaseName as instanceID
useReleaseName: false
# useReleaseName: true
# -- Use a custom instanceID
explicitID: ""
# explicitID: unique-argo-controller-identifier
logging:
# -- Set the logging level (one of: `debug`, `info`, `warn`, `error`)
level: info
# -- Set the glog logging level
globallevel: "0"
# -- Set the logging format (one of: `text`, `json`)
format: "text"
# -- Service type of the controller Service
serviceType: ClusterIP
# -- Annotations to be applied to the controller Service
serviceAnnotations: {}
# -- Optional labels to add to the controller Service
serviceLabels: {}
# -- The class of the load balancer implementation
loadBalancerClass: ""
# -- Source ranges to allow access to service from. Only applies to service type `LoadBalancer`
loadBalancerSourceRanges: []
# -- Resource limits and requests for the controller
resources: {}
# -- Configure liveness [probe] for the controller
# @default -- See [values.yaml]
livenessProbe:
httpGet:
port: 6060
path: /healthz
failureThreshold: 3
initialDelaySeconds: 90
periodSeconds: 60
timeoutSeconds: 30
# -- Extra environment variables to provide to the controller container
extraEnv: []
# - name: FOO
# value: "bar"
# -- Extra arguments to be added to the controller
extraArgs: []
# -- Additional volume mounts to the controller main container
volumeMounts: []
# -- Additional volumes to the controller pod
volumes: []
# -- The number of controller pods to run
replicas: 1
# -- The number of revisions to keep.
revisionHistoryLimit: 10
pdb:
# -- Configure [Pod Disruption Budget] for the controller pods
enabled: false
# minAvailable: 1
# maxUnavailable: 1
# -- [Node selector]
nodeSelector:
kubernetes.io/os: linux
# -- [Tolerations] for use with node taints
tolerations: []
# -- Assign custom [affinity] rules
affinity: {}
# -- Assign custom [TopologySpreadConstraints] rules to the workflow controller
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# -- Leverage a PriorityClass to ensure your pods survive resource shortages.
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
# -- Configure Argo Server to show custom [links]
## Ref: https://argo-workflows.readthedocs.io/en/stable/links/
links: []
# -- Configure Argo Server to show custom [columns]
## Ref: https://github.com/argoproj/argo-workflows/pull/10693
columns: []
# -- Set ui navigation bar background color
navColor: ""
clusterWorkflowTemplates:
# -- Create a ClusterRole and CRB for the controller to access ClusterWorkflowTemplates.
enabled: true
# -- Extra service accounts to be added to the ClusterRoleBinding
serviceAccounts: []
# - name: my-service-account
# namespace: my-namespace
# -- Extra containers to be added to the controller deployment
extraContainers: []
# -- Enables init containers to be added to the controller deployment
extraInitContainers: []
# -- Workflow retention by number of workflows
retentionPolicy: {}
# completed: 10
# failed: 3
# errored: 3
nodeEvents:
# -- Enable to emit events on node completion.
## This can take up a lot of space in k8s (typically etcd) resulting in errors when trying to create new events:
## "Unable to create audit event: etcdserver: mvcc: database space exceeded"
enabled: true
# -- Configure when workflow controller runs in a different k8s cluster with the workflow workloads,
# or needs to communicate with the k8s apiserver using an out-of-cluster kubeconfig secret.
# @default -- `{}` (See [values.yaml])
kubeConfig: {}
# # name of the kubeconfig secret, may not be empty when kubeConfig specified
# secretName: kubeconfig-secret
# # key of the kubeconfig secret, may not be empty when kubeConfig specified
# secretKey: kubeconfig
# # mounting path of the kubeconfig secret, default to /kube/config
# mountPath: /kubeconfig/mount/path
# # volume name when mounting the secret, default to kubeconfig
# volumeName: kube-config-volume
# -- Specifies the duration in seconds before a terminating pod is forcefully killed. A zero value indicates that the pod will be forcefully terminated immediately.
# @default -- `30` seconds (Kubernetes default)
podGCGracePeriodSeconds:
# -- The duration in seconds before the pods in the GC queue get deleted. A zero value indicates that the pods will be deleted immediately.
# @default -- `5s` (Argo Workflows default)
podGCDeleteDelayDuration: ""
# mainContainer adds default config for main container that could be overriden in workflows template
mainContainer:
# -- imagePullPolicy to apply to Workflow main container. Defaults to `.Values.images.pullPolicy`.
imagePullPolicy: ""
# -- Resource limits and requests for the Workflow main container
resources: {}
# -- Adds environment variables for the Workflow main container
env: []
# -- Adds reference environment variables for the Workflow main container
envFrom: []
# -- sets security context for the Workflow main container
securityContext: {}
# executor controls how the init and wait container should be customized
executor:
image:
# -- Registry to use for the Workflow Executors
registry: quay.io
# -- Repository to use for the Workflow Executors
repository: argoproj/argoexec
# -- Image tag for the workflow executor. Defaults to `.Values.images.tag`.
tag: ""
# -- Image PullPolicy to use for the Workflow Executors. Defaults to `.Values.images.pullPolicy`.
pullPolicy: ""
# -- Resource limits and requests for the Workflow Executors
resources: {}
# -- Passes arguments to the executor processes
args: []
# -- Adds environment variables for the executor.
env: []
# -- sets security context for the executor container
securityContext: {}
server:
# -- Deploy the Argo Server
enabled: true
# -- Value for base href in index.html. Used if the server is running behind reverse proxy under subpath different from /.
## only updates base url of resources on client side,
## it's expected that a proxy server rewrites the request URL and gets rid of this prefix
## https://github.com/argoproj/argo-workflows/issues/716#issuecomment-433213190
baseHref: /
image:
# -- Registry to use for the server
registry: quay.io
# -- Repository to use for the server
repository: argoproj/argocli
# -- Image tag for the Argo Workflows server. Defaults to `.Values.images.tag`.
tag: ""
# -- optional map of annotations to be applied to the ui Deployment
deploymentAnnotations: {}
# -- optional map of annotations to be applied to the ui Pods
podAnnotations: {}
# -- Optional labels to add to the UI pods
podLabels: {}
# -- SecurityContext to set on the server pods
podSecurityContext: {}
rbac:
# -- Adds Role and RoleBinding for the server.
create: true
# -- Servers container-level security context
securityContext:
readOnlyRootFilesystem: false
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- Server name string
name: server
# -- Service type for server pods
serviceType: ClusterIP
# -- Service port for server
servicePort: 2746
# -- Service node port
serviceNodePort: # 32746
# -- Service port name
servicePortName: "" # http
# -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files
hostAliases: []
# - ip: 10.20.30.40
# hostnames:
# - git.myhostname
serviceAccount:
# -- Create a service account for the server
create: true
# -- Service account name
name: ""
# -- Labels applied to created service account
labels: {}
# -- Annotations applied to created service account
annotations: {}
# -- Annotations to be applied to the UI Service
serviceAnnotations: {}
# -- Optional labels to add to the UI Service
serviceLabels: {}
# -- The class of the load balancer implementation
loadBalancerClass: ""
# -- Static IP address to assign to loadBalancer service type `LoadBalancer`
loadBalancerIP: ""
# -- Source ranges to allow access to service from. Only applies to service type `LoadBalancer`
loadBalancerSourceRanges: []
# -- Resource limits and requests for the server
resources: {}
# -- The number of server pods to run
replicas: 1
# -- The number of revisions to keep.
revisionHistoryLimit: 10
## Argo Server Horizontal Pod Autoscaler
autoscaling:
# -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo Server
enabled: false
# -- Minimum number of replicas for the Argo Server [HPA]
minReplicas: 1
# -- Maximum number of replicas for the Argo Server [HPA]
maxReplicas: 5
# -- Average CPU utilization percentage for the Argo Server [HPA]
targetCPUUtilizationPercentage: 50
# -- Average memory utilization percentage for the Argo Server [HPA]
targetMemoryUtilizationPercentage: 50
# -- Configures the scaling behavior of the target in both Up and Down directions.
# This is only available on HPA apiVersion `autoscaling/v2beta2` and newer
behavior: {}
# scaleDown:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 1
# periodSeconds: 180
# scaleUp:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 2
pdb:
# -- Configure [Pod Disruption Budget] for the server pods
enabled: false
# minAvailable: 1
# maxUnavailable: 1
# -- [Node selector]
nodeSelector:
kubernetes.io/os: linux
# -- [Tolerations] for use with node taints
tolerations: []
# -- Assign custom [affinity] rules
affinity: {}
# -- Assign custom [TopologySpreadConstraints] rules to the argo server
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# -- Leverage a PriorityClass to ensure your pods survive resource shortages
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
# -- Run the argo server in "secure" mode. Configure this value instead of `--secure` in extraArgs.
## See the following documentation for more details on secure mode:
## https://argo-workflows.readthedocs.io/en/stable/tls/
secure: false
# -- Extra environment variables to provide to the argo-server container
extraEnv: []
# - name: FOO
# value: "bar"
# -- Deprecated; use server.authModes instead.
authMode: ""
# -- A list of supported authentication modes. Available values are `server`, `client`, or `sso`. If you provide sso, please configure `.Values.server.sso` as well.
## Ref: https://argo-workflows.readthedocs.io/en/stable/argo-server-auth-mode/
authModes: []
# -- Extra arguments to provide to the Argo server binary.
## Ref: https://argo-workflows.readthedocs.io/en/stable/argo-server/#options
extraArgs: []
logging:
# -- Set the logging level (one of: `debug`, `info`, `warn`, `error`)
level: info
# -- Set the glog logging level
globallevel: "0"
# -- Set the logging format (one of: `text`, `json`)
format: "text"
# -- Volume to be mounted in Pods for temporary files.
tmpVolume:
emptyDir: {}
# -- Additional volume mounts to the server main container.
volumeMounts: []
# -- Additional volumes to the server pod.
volumes: []
## Ingress configuration.
# ref: https://kubernetes.io/docs/user-guide/ingress/
ingress:
# -- Enable an ingress resource
enabled: false
# -- Additional ingress annotations
annotations: {}
# -- Additional ingress labels
labels: {}
# -- Defines which ingress controller will implement the resource
ingressClassName: ""
# -- List of ingress hosts
## Hostnames must be provided if Ingress is enabled.
## Secrets must be manually created in the namespace
hosts: []
# - argoworkflows.example.com
# -- List of ingress paths
paths:
- /
# -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
pathType: Prefix
# -- Additional ingress paths
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used)
# - path: /*
# pathType: Prefix
# backend:
# service
# name: ssl-redirect
# port:
# name: use-annotation
# -- Ingress TLS configuration
tls: []
# - secretName: argoworkflows-example-tls
# hosts:
# - argoworkflows.example.com
## Create a Google Backendconfig for use with the GKE Ingress Controller
## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-configuration#configuring_ingress_features_through_backendconfig_parameters
GKEbackendConfig:
# -- Enable BackendConfig custom resource for Google Kubernetes Engine
enabled: false
# -- [BackendConfigSpec]
spec: {}
# spec:
# iap:
# enabled: true
# oauthclientCredentials:
# secretName: argoworkflows-secret
## Create a Google Managed Certificate for use with the GKE Ingress Controller
## https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs
GKEmanagedCertificate:
# -- Enable ManagedCertificate custom resource for Google Kubernetes Engine.
enabled: false
# -- Domains for the Google Managed Certificate
domains:
- argoworkflows.example.com
## Create a Google FrontendConfig Custom Resource, for use with the GKE Ingress Controller
## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters
GKEfrontendConfig:
# -- Enable FrontConfig custom resource for Google Kubernetes Engine
enabled: false
# -- [FrontendConfigSpec]
spec: {}
# spec:
# redirectToHttps:
# enabled: true
# responseCodeName: RESPONSE_CODE
clusterWorkflowTemplates:
# -- Create a ClusterRole and CRB for the server to access ClusterWorkflowTemplates.
enabled: true
# -- Give the server permissions to edit ClusterWorkflowTemplates.
enableEditing: true
# SSO configuration when SSO is specified as a server auth mode.
sso:
# -- Create SSO configuration. If you set `true` , please also set `.Values.server.authMode` as `sso`.
enabled: false
# -- The root URL of the OIDC identity provider
issuer: https://accounts.google.com
clientId:
# -- Name of secret to retrieve the app OIDC client ID
name: argo-server-sso
# -- Key of secret to retrieve the app OIDC client ID
key: client-id
clientSecret:
# -- Name of a secret to retrieve the app OIDC client secret
name: argo-server-sso
# -- Key of a secret to retrieve the app OIDC client secret
key: client-secret
# -- The OIDC redirect URL. Should be in the form <argo-root-url>/oauth2/callback.
redirectUrl: ""
rbac:
# -- Adds ServiceAccount Policy to server (Cluster)Role.
enabled: true
# -- Whitelist to allow server to fetch Secrets
## When present, restricts secrets the server can read to a given list.
## You can use it to restrict the server to only be able to access the
## service account token secrets that are associated with service accounts
## used for authorization.
secretWhitelist: []
# -- Scopes requested from the SSO ID provider
## The 'groups' scope requests group membership information, which is usually used for authorization decisions.
scopes: []
# - groups
# -- Define how long your login is valid for (in hours)
## If omitted, defaults to 10h.
sessionExpiry: ""
# -- Alternate root URLs that can be included for some OIDC providers
issuerAlias: ""
# -- Override claim name for OIDC groups
customGroupClaimName: ""
# -- Specify the user info endpoint that contains the groups claim
## Configure this if your OIDC provider provides groups information only using the user-info endpoint (e.g. Okta)
userInfoPath: ""
# -- Skip TLS verification for the HTTP client
insecureSkipVerify: false
# -- Filter the groups returned by the OIDC provider
## A logical "OR" is used between each regex in the list
filterGroupsRegex: []
# - ".*argo-wf.*"
# - ".*argo-workflow.*"
# -- Extra containers to be added to the server deployment
extraContainers: []
# -- Enables init containers to be added to the server deployment
extraInitContainers: []
# -- Specify postStart and preStop lifecycle hooks for server container
lifecycle: {}
# -- terminationGracePeriodSeconds for container lifecycle hook
terminationGracePeriodSeconds: 30
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argo-server-sso
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argo/server/sso"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argo-server-sso-secrets-store
# type: Opaque
# -- Use static credentials for S3 (eg. when not using AWS IRSA)
useStaticCredentials: true
artifactRepository:
# -- Archive the main container logs as an artifact
archiveLogs: false
# -- Store artifact in a S3-compliant object store
# @default -- See [values.yaml]
s3: {}
# # Note the `key` attribute is not the actual secret, it's the PATH to
# # the contents in the associated secret, as defined by the `name` attribute.
# accessKeySecret:
# name: "{{ .Release.Name }}-minio"
# key: accesskey
# secretKeySecret:
# name: "{{ .Release.Name }}-minio"
# key: secretkey
# # insecure will disable TLS. Primarily used for minio installs not configured with TLS
# insecure: false
# caSecret:
# name: ca-root
# key: cert.pem
# bucket:
# endpoint:
# region:
# roleARN:
# useSDKCreds: true
# encryptionOptions:
# enableEncryption: true
# -- Store artifact in a GCS object store
# @default -- `{}` (See [values.yaml])
gcs: {}
# bucket: <project>-argo
# keyFormat: "{{ \"{{workflow.namespace}}/{{workflow.name}}/{{pod.name}}\" }}"
# # serviceAccountKeySecret is a secret selector.
# # It references the k8s secret named 'my-gcs-credentials'.
# # This secret is expected to have have the key 'serviceAccountKey',
# # containing the base64 encoded credentials
# # to the bucket.
# #
# # If it's running on GKE and Workload Identity is used,
# # serviceAccountKeySecret is not needed.
# serviceAccountKeySecret:
# name: my-gcs-credentials
# key: serviceAccountKey
# -- Store artifact in Azure Blob Storage
# @default -- `{}` (See [values.yaml])
azure: {}
# endpoint: https://mystorageaccountname.blob.core.windows.net
# container: my-container-name
# blobNameFormat: path/in/container
# # accountKeySecret is a secret selector.
# # It references the k8s secret named 'my-azure-storage-credentials'.
# # This secret is expected to have have the key 'account-access-key',
# # containing the base64 encoded credentials to the storage account.
# # If a managed identity has been assigned to the machines running the
# # workflow (e.g., https://docs.microsoft.com/en-us/azure/aks/use-managed-identity)
# # then accountKeySecret is not needed, and useSDKCreds should be
# # set to true instead:
# useSDKCreds: true
# accountKeySecret:
# name: my-azure-storage-credentials
# key: account-access-key
# -- The section of custom artifact repository.
# Utilize a custom artifact repository that is not one of the current base ones (s3, gcs, azure)
customArtifactRepository: {}
# artifactory:
# repoUrl: https://artifactory.example.com/raw
# usernameSecret:
# name: artifactory-creds
# key: username
# passwordSecret:
# name: artifactory-creds
# key: password
# -- The section of [artifact repository ref](https://argo-workflows.readthedocs.io/en/stable/artifact-repository-ref/).
# Each map key is the name of configmap
# @default -- `{}` (See [values.yaml])
artifactRepositoryRef: {}
# # -- 1st ConfigMap
# # If you want to use this config map by default, name it "artifact-repositories".
# # Otherwise, you can provide a reference to a
# # different config map in `artifactRepositoryRef.configMap`.
# artifact-repositories:
# # -- v3.0 and after - if you want to use a specific key, put that key into this annotation.
# annotations:
# workflows.argoproj.io/default-artifact-repository: default-v1-s3-artifact-repository
# # 1st data of configmap. See above artifactRepository or customArtifactRepository.
# default-v1-s3-artifact-repository:
# archiveLogs: false
# s3:
# bucket: my-bucket
# endpoint: minio:9000
# insecure: true
# accessKeySecret:
# name: my-minio-cred
# key: accesskey
# secretKeySecret:
# name: my-minio-cred
# key: secretkey
# # 2nd data
# oss-artifact-repository:
# archiveLogs: false
# oss:
# endpoint: http://oss-cn-zhangjiakou-internal.aliyuncs.com
# bucket: $mybucket
# # accessKeySecret and secretKeySecret are secret selectors.
# # It references the k8s secret named 'bucket-workflow-artifect-credentials'.
# # This secret is expected to have have the keys 'accessKey'
# # and 'secretKey', containing the base64 encoded credentials
# # to the bucket.
# accessKeySecret:
# name: $mybucket-credentials
# key: accessKey
# secretKeySecret:
# name: $mybucket-credentials
# key: secretKey
# # 2nd ConfigMap
# another-artifact-repositories:
# annotations:
# workflows.argoproj.io/default-artifact-repository: gcs
# gcs:
# bucket: my-bucket
# keyFormat: prefix/in/bucket/{{workflow.name}}/{{pod.name}}
# serviceAccountKeySecret:
# name: my-gcs-credentials
# key: serviceAccountKey
emissary:
# -- The command/args for each image on workflow, needed when the command is not specified and the emissary executor is used.
## See more: https://argo-workflows.readthedocs.io/en/stable/workflow-executors/#emissary-emissary
images: []
# argoproj/argosay:v2:
# cmd: [/argosay]
# docker/whalesay:latest:
# cmd: [/bin/bash]