📚Values
# Copyright ToucanToco, SAS. All Rights Reserved.
# SPDX-License-Identifier: Custom-License
global:
## @param global.imageRegistry Override Image registry of every container.
imageRegistry: ''
## @param global.imagePullSecrets Set the image pull secrets of every pod.
imagePullSecrets:
[]
# - myRegistryKeySecretName
## @param global.defaultStorageClass Set the storage class to use for persistent volumes.
defaultStorageClass: ''
## @param global.hostname Public hostname of the Toucan Stack.
## It configures `nginx` Ingress.
## You should also set `curity.runtime.ingress` to expose the auth service (it must be hosted in another domain).
hostname: ''
## @param global.allowedOrigins List of origins that can be contacted by the Toucan Stack.
## By default, it already contains the Toucan Stack hostname.
allowedOrigins: []
oauth2:
## @param global.oauth2.clientSecrets Configure the client secrets
##
## To generate one: `openssl rand -base64 32`
clientSecrets:
gatewayToucan: ''
toucanAdminManagement: ''
toucanEmbed: ''
toucanImpersonateService: ''
toucanImpersonateServiceEmbed: ''
toucanInfraAdmin: ''
toucanLaputaWebsocket: ''
toucanMicroService: ''
## @param global.oauth2.clientSecrets.existingSecret Name of an existing secret
## to use for Curity OAuth2 clients.
## The entries above will be ignored and picked up from this secret.
## The secret must contain the keys `curity-<client-name>-client-secret` like "curity-gateway-toucan-client-secret".
## The value is evaluated as a template.
existingSecret: ''
## Configure Curity and Laputa SMTP settings
smtp:
enabled: false
host: ''
port: 587
## @param global.smtp.sender Sender email address.
sender:
displayName: ''
email: '' # Example: '[email protected]'
## @param global.smtp.username SMTP username.
username: ''
## @param global.smtp.password SMTP password.
password: ''
## @param global.smtp.existingSecret Name of an existing secret to use for Curity email
## It must contain the key `toucan-smtp-password`. If set, `password` is ignored.
existingSecret: ''
tls:
enabled: true
## @param global.smtp.tls.type Type of TLS.
## Allowed values: starttls (often port 587), tls (often port 465)
type: starttls
toucan:
auth:
## @param global.auth.adminAccount First admin account.
adminAccount: [email protected]
password: ''
## @param global.auth.existingSecret Name of an existing secret
## to use for Curity authentication.
## `global.auth.adminAccount` and `global.auth.password` will be ignored and picked up from this secret.
## The secret must contain the key `toucan-admin-password`.
## The value is evaluated as a template.
existingSecret: ''
## @param global.postgresql PostgreSQL parameters
postgresql:
auth:
username: 'toucan'
password: ''
database: 'curity'
## @param global.postgresql.auth.existingSecret Name of an existing secret
## to use for PostgreSQL authentication.
## `global.postgresql.auth.username` and `global.postgresql.auth.password` will be ignored and picked up from this secret.
existingSecret: ''
secretKeys:
userPasswordKey: password
databases:
dataset:
name: dataset
schemas:
- dataset
layout:
name: layout
schemas:
- layout
- workspace
spicedb:
name: spicedb
vault:
name: vault
service:
ports:
postgresql: 5432
## @param global.tenantID Defines the name of the profile in the identity provider.
## This is used internally by Toucan, and doesn't add any value to the end user.
tenantID: 7bf98083-e4ff-4769-baa7-da4fde86d932
## @param global.workspaceID Defines the name of the backend in the authorization server.
## This is used internally by Toucan, and doesn't add any value to the end user.
workspaceID: 7db50ec9-2585-4bf7-bfcc-4ebdb787ac70
## @param kubeVersion Override Kubernetes version
kubeVersion: ''
## @param apiVersions Override Kubernetes API versions reported by .Capabilities
apiVersions: []
## @param nameOverride String to partially override common.names.name
nameOverride: ''
## @param fullnameOverride String to fully override common.names.fullname
fullnameOverride: ''
## @param namespaceOverride String to fully override common.names.namespace
namespaceOverride: ''
## @param commonLabels Labels to add to all deployed objects
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
commonAnnotations: {}
## @param clusterDomain Kubernetes cluster domain name
clusterDomain: cluster.local
## @section Nginx Subchart Parameters
## Complete values: https://github.com/bitnami/charts/blob/main/bitnami/nginx/values.yaml
nginx:
enabled: true
nameOverride: tucana
## nginx resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# memory: 256Mi
## @section Tucana Front-End Parameters
tucana:
image:
registry: quay.io
repository: toucantoco/frontend
# renovate: image=quay.io/toucantoco/frontend
tag: 'v150.0.1'
digest: ''
pullPolicy: IfNotPresent
## Configure tucana.
## @param tucana.config.tcParams Configure tucana parameters.
## @param tucana.config.embed Configure tucana embed parameters.
config:
tcParams:
## @param tucana.config.tcParams.API_BASEROUTE The public URL of Laputa (back-end).
API_BASEROUTE: https://{{ .Values.global.hostname }}/laputa
## @param tucana.config.tcParams.DATASET_SERVICE_BASEROUTE The public URL of the Dataset Service.
DATASET_SERVICE_BASEROUTE: https://{{ .Values.global.hostname }}/dataset
## @param tucana.config.tcParams.WORKSPACE_SERVICE_BASEROUTE The public URL of the Layout Service.
WORKSPACE_SERVICE_BASEROUTE: https://{{ .Values.global.hostname }}/layout
APP_TEMPLATES_ENABLED: 'true'
TENANT:
AUTH_SERVICE_BASE_URL: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
AUTH_SERVICE_ENDPOINTS:
OAUTH_AUTHORIZE: '/{{ .Values.global.tenantID }}/oauth/oauth-authorize'
OAUTH_TOKEN: '/{{ .Values.global.tenantID }}/oauth/oauth-token'
OAUTH_USERINFO: '/{{ .Values.global.tenantID }}/oauth/oauth-userinfo'
LOGOUT: "/{{ .Values.global.tenantID }}/authentication/authentication/logout"
PUBLIC_OIDC_CLIENT:
CLIENT_ID: workspace-toucan
CODE_CLIENT_ID: workspace-{{ .Values.global.workspaceID }}
REDIRECT_URI: '{{ include "toucan-stack.tucana.baseURL" . }}/oauth/callback'
SCOPE: openid simple_user profile email
AUTH_LOGOUT_REDIRECT_URI: '{{ include "toucan-stack.tucana.baseURL" . }}'
TENANT_ID: '{{ .Values.global.tenantID }}'
WORKSPACE_ID: '{{ .Values.global.workspaceID }}'
embed:
## @param tucana.config.embed.API_BASEROUTE The public URL of Laputa (back-end).
API_BASEROUTE: https://{{ .Values.global.hostname }}/laputa
## @param tucana.config.embed.DATASET_SERVICE_BASEROUTE The public URL of the Dataset Service.
DATASET_SERVICE_BASEROUTE: https://{{ .Values.global.hostname }}/dataset
## @param tucana.config.embed.WORKSPACE_SERVICE_BASEROUTE The public URL of the Layout Service.
WORKSPACE_SERVICE_BASEROUTE: https://{{ .Values.global.hostname }}/layout
BULK_DOWNLOAD_DASHBOARD_DATASETS: enable
DOWNLOAD_DASHBOARD_DATASETS: disable
SANITIZE_HTML: enable
TENANT:
AUTH_SERVICE_BASE_URL: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
AUTH_SERVICE_ENDPOINTS:
OAUTH_AUTHORIZE: '/{{ .Values.global.tenantID }}/oauth/oauth-authorize'
OAUTH_TOKEN: '/{{ .Values.global.tenantID }}/oauth/oauth-token'
OAUTH_USERINFO: '/{{ .Values.global.tenantID }}/oauth/oauth-userinfo'
LOGOUT: "/{{ .Values.global.tenantID }}/authentication/authentication/logout"
PUBLIC_OIDC_CLIENT:
CLIENT_ID: workspace-toucan
CODE_CLIENT_ID: workspace-toucan-code
REDIRECT_URI: '{{ include "toucan-stack.tucana.baseURL" . }}/oauth/callback'
SCOPE: openid simple_user profile email
AUTH_LOGOUT_REDIRECT_URI: '{{ include "toucan-stack.tucana.baseURL" . }}'
TENANT_ID: '{{ .Values.global.tenantID }}'
WORKSPACE_ID: '{{ .Values.global.workspaceID }}'
## @param tucana.existingConfigmap The name of an existing ConfigMap with your custom configuration for tucana
existingConfigmap:
authPlugin:
image:
registry: quay.io
repository: toucantoco/ngx-auth-module
# renovate: image=quay.io/toucantoco/ngx-auth-module
tag: 'v1.6.0-toucan.5-debian12-ngx1.27.4'
digest: ''
pullPolicy: IfNotPresent
utils:
image:
registry: docker.io
repository: library/busybox
# renovate: image=docker.io/library/busybox
tag: '1.37'
digest: ''
pullPolicy: IfNotPresent
args:
- /opt/bitnami/nginx/sbin/nginx
- -g
- 'daemon off;'
- -c
- /opt/bitnami/nginx/conf/nginx.conf
image:
registry: docker.io
repository: bitnami/nginx
tag: 1.27.4
service:
enabled: true
type: ClusterIP
ingress:
enabled: false
selfSigned: false
pathType: ImplementationSpecific
hostname: ''
path: /
annotations:
{}
# nginx.ingress.kubernetes.io/proxy-body-size: 100m # Recommended if you are planning to upload large files.
ingressClassName: ''
tls: false
extraHosts: []
extraPaths: []
extraTls: []
secrets: []
extraRules: []
tls:
enabled: false
# Disable readiness probe to avoid chicken and egg problem.
readinessProbe:
enabled: false
## @section Laputa Legacy Backend-End Parameters
laputa:
enabled: true
## Configure laputa.
## @param laputa.config.tenancy Configure laputa tenancy parameters.
## @param laputa.config.spicedb Configure laputa spicedb parameters.
##
## Use [[ getenv "VAR_NAME" ]] to inject a secret, in combination with laputa.initconfig.secrets.
config:
tenancy:
workspace_id: '{{ .Values.global.workspaceID }}'
tenant_id: '{{ .Values.global.tenantID }}'
toucan_oidc_configuration:
algorithms:
- RS512
audiences:
- workspace-toucan
- workspace-toucan-code
- toucan-infra-admin-client
- toucan-embed-client
- toucan-impersonate-service-client
jwks:
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/{{ .Values.global.tenantID }}/oauth/oauth-anonymous/jwks'
cache_ttl: 3600
min_time_between_attempts: 60
toucan_user_management:
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/{{ .Values.global.tenantID }}/user-management/graphql/admin'
vault:
secret_path: 'http://{{ include "vault.server.fullname" .Subcharts.vault }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.vault.server.service.general.ports.http }}/v1/toucan_oauthapp_tenant/self/{{ .Values.global.tenantID }}/toucan-admin-management-client'
token_header: X-Vault-Token
token: '[[ getenv "VAULT_TOKEN" ]]'
toucan_micro_service_client:
vault:
secret_path: 'http://{{ include "vault.server.fullname" .Subcharts.vault }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.vault.server.service.general.ports.http }}/v1/toucan_oauthapp_tenant/self/{{ .Values.global.tenantID }}/toucan-micro-service-client'
token_header: X-Vault-Token
token: '[[ getenv "VAULT_TOKEN" ]]'
toucan_websocket_client:
client_id: toucan-laputa-websocket-client
client_secret: '[[ getenv "TOUCAN_LAPUTA_WEBSOCKET_CLIENT_SECRET" ]]'
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/{{ .Values.global.tenantID }}/oauth/oauth-introspect'
toucan_public_embed_client:
client_id: toucan-embed-client
jwt_assertion_audience: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/{{ .Values.global.tenantID }}/oauth/oauth-anonymous'
jwt_assertion_issuer: '{{ .Values.global.tenantID }}-embed'
curity_admin_api_configuration:
username: admin
password: '[[ getenv "AUTHN_ADMIN_PASSWORD" ]]'
embed_client_public_key_id: 'embed-verification-key'
curity_admin_url_prefix: http://{{ include "toucan-stack.curity.admin.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.curity.admin.service.ports.admin }}/admin/api/restconf/data
oauth_profile_id: '{{ .Values.global.tenantID }}-oauth'
impersonate_token_service_configuration:
url: 'http://{{ include "toucan-stack.impersonate.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.impersonate.service.ports.http }}'
spicedb:
url: '{{ include "toucan-stack.spicedb.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.spicedb.service.ports.grpc }}'
preshared_key: '[[ getenv "SPICEDB_PRESHARED_KEY" ]]'
ca_cert_path: '/spicedb-certs/ca.crt'
## @param laputa.config.common Common are direct environment variables used by Toucan.
##
## To set secrets securely, simply use laputa.extraEnvVars.
common:
# Service variables
TOUCAN_NGINX_SUPERVISOR: 'on'
# Main variables
TOUCAN_INSTANCE_NAME: '{{ .Values.global.hostname }}'
TOUCAN_FRONTEND_URLS: '["{{ include "toucan-stack.tucana.baseURL" . }}"]'
# Mongo variables
TOUCAN_MONGODB_HOST: '{{ template "toucan-stack.mongodb.fullname" . }}.{{ include "common.names.namespace" . }}.svc'
TOUCAN_MONGODB_PORT: '{{ .Values.mongodb.service.ports.mongo }}'
TOUCAN_MONGODB_USER: 'app'
TOUCAN_MONGODB_READONLY_USER: 'app_readonly'
TOUCAN_MONGODB_ADMIN_USER: 'admin'
# Redis variables
TOUCAN_REDIS_HOST: '{{ printf "%s-master" (include "common.names.fullname" (index .Subcharts "laputa-redis")) }}.{{ include "common.names.namespace" . }}.svc'
TOUCAN_REDIS_PORT: '{{ (index .Values "laputa-redis" "master" "service" "ports" "redis") }}'
# Misc variables
TOUCAN_WHITE_LABEL: 'enable'
TOUCAN_PDF_REPORT: 'enable'
TOUCAN_DATAWALL: 'enable'
TOUCAN_ALERTS: 'enable'
# Prepared Datasets
TOUCAN_QUERY_CACHE: 'enable'
# Gotenberg screenshot service
TOUCAN_GOTENBERG_URL: 'http://{{ include "gotenberg.fullname" .Subcharts.gotenberg }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.gotenberg.service.port }}'
TOUCAN_GOTENBERG_FLAG: 'experimental'
TOUCAN_LOG_LEVEL: 'INFO'
# Layout service
TOUCAN_LAYOUT_SERVICE_FLAG: 'enable'
TOUCAN_LAYOUT_SERVICE_URL: 'http://{{ include "toucan-stack.layout.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.layout.service.ports.http }}'
# Dataset service
TOUCAN_DATASET_SERVICE: 'enable'
TOUCAN_DATASET_SERVICE_URL: 'http://{{ include "toucan-stack.dataset.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.dataset.service.ports.http }}'
TOUCAN_TENANCY_FLAG: 'enable'
## @param laputa.secrets.secrets Configure Laputa required secrets.
##
## To generate one: `openssl rand -base64 32`
secrets:
dbEncryptionSecret: ''
jwtSecretKey: ''
## @param laputa.secrets.existingSecret Name of an existing secret to use for Laputa secrets.
## The entries above will be ignored and picked up from this secret.
## The secret must contain the keys `laputa-db-encryption-secret` and `laputa-jwt-secret-key`.
## The value is evaluated as a template.
existingSecret: ''
image:
registry: quay.io
repository: toucantoco/backend
# renovate: image=quay.io/toucantoco/backend
tag: 'v150.0.4'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param laputa.replicaCount Number of laputa replicas to deploy
replicaCount: 1
containerPorts:
http: 80
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## laputa resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
# requests:
# cpu: 1
# memory: 4Gi
# limits:
# memory: 12Gi
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1000
containerSecurityContext:
enabled: false
seLinuxOptions: {}
readOnlyRootFilesystem: false # Laputa generates a file at /app/config.yml
privileged: false
capabilities:
drop: ['ALL']
add: ['NET_BIND_SERVICE']
seccompProfile:
type: 'RuntimeDefault'
## Configure the initconfig container.
initconfig:
image:
registry: docker.io
repository: hairyhenderson/gomplate
# renovate: image=docker.io/hairyhenderson/gomplate
tag: v4.3.2-alpine
pullPolicy: IfNotPresent
pullSecrets: []
## @param laputa.initconfig.secrets Dictionary of secrets to add to the initconfig container
secrets:
SPICEDB_PRESHARED_KEY:
key: spicedb-preshared-key
name: '{{ include "toucan-stack.spicedb.secretName" . }}'
AUTHN_ADMIN_PASSWORD:
key: curity-admin-password
name: '{{ include "toucan-stack.curity.config.secretName" .}}'
TOUCAN_LAPUTA_WEBSOCKET_CLIENT_SECRET:
key: curity-toucan-laputa-websocket-client-secret
name: '{{ include "toucan-stack.curity.oauth2.secretName" . }}'
VAULT_TOKEN:
key: vault-token
name: '{{- include "toucan-stack.vault.oauthapp.secretName" . -}}'
## @param laputa.extraEnvVars Array with extra environment variables to add to laputa containers
extraEnvVars: {}
## @param laputa.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for laputa containers
extraEnvVarsCM: ''
## @param laputa.extraEnvVarsSecret Name of existing Secret containing extra env vars for laputa containers
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
## Configure laputa logging container
logging:
image:
registry: docker.io
repository: library/busybox
# renovate: image=docker.io/library/busybox
tag: '1.37'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param laputa.existingConfigmap The name of an existing ConfigMap with your custom configuration for laputa
##
existingConfigmap:
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
statefulsetAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param laputa.podAffinityPreset Pod affinity preset. Ignored if `laputa.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param laputa.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `laputa.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param laputa.nodeAffinityPreset.type Node affinity preset type. Ignored if `laputa.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param laputa.nodeAffinityPreset.key Node label key to match. Ignored if `laputa.affinity` is set
key: ''
## @param laputa.nodeAffinityPreset.values Node label values to match. Ignored if `laputa.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param laputa.affinity Affinity for laputa pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `laputa.podAffinityPreset`, `laputa.podAntiAffinityPreset`, and `laputa.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
extraEnvVars:
- name: TOUCAN_MONGODB_PASS
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.mongodb.auth.secretName" . }}'
key: mongodb-app-password
- name: TOUCAN_MONGODB_READONLY_PASS
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.mongodb.auth.secretName" . }}'
key: mongodb-app-readonly-password
- name: TOUCAN_MONGODB_ADMIN_PASS
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.mongodb.auth.secretName" . }}'
key: mongodb-root-password
- name: TOUCAN_REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "redis.secretName" (index .Subcharts "laputa-redis") }}'
key: redis-password
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes:
- name: spicedb-certs
secret:
secretName: '{{ template "toucan-stack.spicedb.tls.secretName" . }}'
items:
- key: ca.crt
path: ca.crt
extraVolumeMounts:
- name: spicedb-certs
mountPath: /spicedb-certs
sidecars: []
initContainers: []
pdb:
create: false
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
service:
type: ClusterIP
ports:
http: 80
nodePorts:
http: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## laputa ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
##
## Laputa is a protected resource! Use `nginx.ingress` instead!
ingress:
enabled: false
pathType: ImplementationSpecific
apiVersion: ''
hostname: laputa.local
ingressClassName: ''
path: /
annotations: {}
tls: false
selfSigned: false
extraHosts: []
extraPaths: []
extraTls: []
secrets: []
extraRules: []
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
persistence:
enabled: true
mountPath: /app/storage
subPath: ''
## @param laputa.persistence.storageClass Storage class of backing PVC
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
storageClass: ''
annotations: {}
accessModes:
- ReadWriteOnce
size: 8Gi
existingClaim: ''
selector: {}
dataSource: {}
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## @section Layout Parameters
layout:
enabled: true
## Configure layout.
## @param layout.config Configure layout parameters.
##
## Use [[ getenv "VAR_NAME" ]] to inject a secret, in combination with layout.initconfig.secrets.
config:
auth:
disabled: false
jwks:
algorithms:
- RS512
audiences:
- workspace-toucan
- workspace-toucan-code
- toucan-infra-admin-client
- toucan-micro-service-client
- toucan-impersonate-service-client
- toucan-embed-client
- toucan-impersonate-service-embed-client
max_ttl_seconds: 43200000
min_ttl_seconds: 300000
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/__TENANT_ID__/oauth/oauth-anonymous/jwks'
database_postgres:
url: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "LAYOUT_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.layout.database.name" . }}?schema=layout'
url_admin: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "LAYOUT_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.layout.database.name" . }}?schema=layout'
database_redis:
cache_ttl_seconds: 3600000
host: '{{ printf "%s-master" (include "common.names.fullname" (index .Subcharts "layout-redis")) }}'
port: 6379
database_spicedb:
ca_cert_path: '/spicedb-certs/ca.crt'
preshared_key: '[[ getenv "SPICEDB_PRESHARED_KEY" ]]'
url: '{{ include "toucan-stack.spicedb.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.spicedb.service.ports.grpc }}'
environment: production
log_level: INFO
profiling:
enabled: false
application_name: layout-service
basic_auth_enabled: true
basic_auth_password: '[[ getenv "PYROSCOPE_PASSWORD" | default "password" ]]'
basic_auth_username: toucantoco
sample_rate: 100
server_address: ''
tags:
{}
# env: PROJECT_NAME
specific:
root_url: 'http://{{ include "toucan-stack.layout.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.layout.service.ports.http }}'
dataset_service_url: 'http://{{ include "toucan-stack.dataset.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.dataset.service.ports.http }}'
sentry_dsn: null
workspace_database_postgres:
url: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "LAYOUT_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.layout.database.name" . }}?schema=workspace'
url_admin: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "LAYOUT_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.layout.database.name" . }}?schema=workspace'
toucan_user_management:
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/__TENANT_ID__/user-management/graphql/admin'
vault:
secret_path: 'http://{{ include "vault.server.fullname" .Subcharts.vault }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.vault.server.service.general.ports.http }}/v1/toucan_oauthapp_tenant/self/__TENANT_ID__/toucan-admin-management-client'
token_header: 'X-Vault-Token'
token: '[[ getenv "VAULT_TOKEN" ]]'
user_provisioning_shared_secret: '[[ getenv "USER_PROVISIONING_SHARED_SECRET" ]]'
tracing:
attributes:
service_name: layout-service
enabled: false
http:
authentication: false
endpoint: ''
insecure: true
url_path: /v1/traces
sampling: parentbase_always_off
image:
registry: quay.io
repository: toucantoco/layout-service
# renovate: image=quay.io/toucantoco/layout-service
tag: 'v1.16.0'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param layout.replicaCount Number of layout replicas to deploy
replicaCount: 1
containerPorts:
http: 3000
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## layout resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 386Mi
# limits:
# memory: 1Gi
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000
supplementalGroups: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
## Configure the initconfig container.
initconfig:
image:
registry: docker.io
repository: hairyhenderson/gomplate
# renovate: image=docker.io/hairyhenderson/gomplate
tag: v4.3.2-alpine
pullPolicy: IfNotPresent
pullSecrets: []
## @param layout.initconfig.secrets Dictionary of secrets to add to the initconfig container
secrets:
SPICEDB_PRESHARED_KEY:
name: '{{ include "toucan-stack.spicedb.secretName" . }}'
key: spicedb-preshared-key
LAYOUT_POSTGRESQL_PASSWORD:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
VAULT_TOKEN:
name: '{{- include "toucan-stack.vault.oauthapp.secretName" . -}}'
key: vault-token
USER_PROVISIONING_SHARED_SECRET:
name: '{{ include "toucan-stack.layout.fullname" . }}'
key: layout-user-provisioning-shared-secret
extraEnvVars: {}
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
## Configure the migration container.
migration:
enabled: true
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
## @param layout.migration.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the layout containers
extraVolumeMounts:
- name: spicedb-certs
mountPath: /spicedb-certs
## @param layout.existingConfigmap The name of an existing ConfigMap with your custom configuration for layout
existingConfigmap:
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
deploymentAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param layout.podAffinityPreset Pod affinity preset. Ignored if `layout.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param layout.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `layout.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param layout.nodeAffinityPreset.type Node affinity preset type. Ignored if `layout.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param layout.nodeAffinityPreset.key Node label key to match. Ignored if `layout.affinity` is set
key: ''
## @param layout.nodeAffinityPreset.values Node label values to match. Ignored if `layout.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param layout.affinity Affinity for layout pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `layout.podAffinityPreset`, `layout.podAntiAffinityPreset`, and `layout.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes:
- name: spicedb-certs
secret:
secretName: '{{ template "toucan-stack.spicedb.tls.secretName" . }}'
items:
- key: ca.crt
path: ca.crt
extraVolumeMounts:
- name: spicedb-certs
mountPath: /spicedb-certs
sidecars: []
initContainers: []
pdb:
create: true
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
hpa:
enabled: false
minReplicas: ''
maxReplicas: ''
targetCPU: ''
targetMemory: ''
service:
type: ClusterIP
ports:
http: 3000
nodePorts:
http: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## layout ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
##
## Layout is a protected resource! Use `nginx.ingress` instead!
ingress:
enabled: false
pathType: ImplementationSpecific
apiVersion: ''
hostname: layout.local
ingressClassName: ''
path: /
annotations: {}
tls: false
selfSigned: false
extraHosts: []
extraPaths: []
extraTls: []
secrets: []
extraRules: []
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## Prometheus metrics
metrics:
enabled: true
## Prometheus Operator ServiceMonitor configuration
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.ServiceMonitor
serviceMonitor:
enabled: false
namespace: ''
annotations: {}
labels: {}
jobLabel: ''
honorLabels: false
interval: ''
scrapeTimeout: ''
metricRelabelings: []
relabelings: []
selector: {}
## @section Dataset Parameters
dataset:
enabled: true
## Configure dataset.
## @param dataset.config Configure dataset parameters.
##
## Use [[ getenv "VAR_NAME" ]] to inject a secret, in combination with dataset.initconfig.secrets.
config:
auth:
disabled: false
jwks:
algorithms:
- RS512
audiences:
- workspace-toucan
- workspace-toucan-code
- toucan-infra-admin-client
- toucan-micro-service-client
- toucan-impersonate-service-client
- toucan-embed-client
- toucan-impersonate-service-embed-client
max_ttl_seconds: 43200
min_ttl_seconds: 300
refresh_timeout_seconds: 2
uri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/__TENANT_ID__/oauth/oauth-anonymous/jwks'
database_postgres:
connection_pool_size: 20
schema: dataset
url: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "DATASET_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.dataset.database.name" . }}'
url_admin: 'postgresql://{{ include "toucan-stack.database.user" . }}:[[ getenv "DATASET_POSTGRESQL_PASSWORD" ]]@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.dataset.database.name" . }}'
database_spicedb:
ca_cert_path: '/spicedb-certs/ca.crt'
preshared_key: '[[ getenv "SPICEDB_PRESHARED_KEY" ]]'
url: '{{ include "toucan-stack.spicedb.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.spicedb.service.ports.grpc }}'
environment: production
profiling:
enabled: false
application_name: dataset-service
basic_auth_enabled: true
basic_auth_password: '[[ getenv "PYROSCOPE_PASSWORD" | default "password" ]]'
basic_auth_username: toucantoco
sample_rate: 100
server_address: ''
tags:
{}
# env: change_me
specific:
debug_log_routes:
- /
- /metrics
log_structured_json_format: true
no_auth_routes:
- /
- /metrics
sentry_dsn: null
testing: false
workspace_service_url: 'http://{{ include "toucan-stack.layout.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.layout.service.ports.http }}'
tracing:
attributes:
service_name: dataset-service
enabled: false
http:
authentication: false
endpoint: ''
insecure: true
url_path: /v1/traces
sampling: parentbase_always_off
vault:
auth_method:
kubernetes_auth:
auth_url: http://{{ include "vault.server.fullname" .Subcharts.vault }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.vault.server.service.general.ports.http }}/v1/auth/kubernetes/login
service_account_token_path: /var/run/secrets/kubernetes.io/serviceaccount/token
base_url: http://{{ include "vault.server.fullname" .Subcharts.vault }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.vault.server.service.general.ports.http }}
request_timeout_seconds: 10
token_header_name: X-Vault-Token
## @param dataset.enableKubernetesAuth Allow dataset to use kubernetes auth to authenticate with HashiCorp Vault
enableKubernetesAuth: true
image:
registry: quay.io
repository: toucantoco/dataset_service
# renovate: image=quay.io/toucantoco/dataset_service
tag: 'v1.16.1'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param dataset.replicaCount Number of dataset replicas to deploy
replicaCount: 1
containerPorts:
http: 3000
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## dataset resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 1Gi
# limits:
# memory: 3Gi
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
## Configure the initconfig container.
initconfig:
image:
registry: docker.io
repository: hairyhenderson/gomplate
# renovate: image=docker.io/hairyhenderson/gomplate
tag: v4.3.2-alpine
pullPolicy: IfNotPresent
pullSecrets: []
## @param dataset.initconfig.secrets Dictionary of secrets to add to the initconfig container
secrets:
SPICEDB_PRESHARED_KEY:
key: spicedb-preshared-key
name: '{{ include "toucan-stack.spicedb.secretName" . }}'
DATASET_POSTGRESQL_PASSWORD:
key: '{{ include "toucan-stack.database.keyName" . }}'
name: '{{ include "toucan-stack.database.secretName" . }}'
extraEnvVars: {}
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
## @param dataset.gunicornConfig Configure gunicorn parameters.
gunicornConfig: |-
import os
import multiprocessing
cpu_limit = float(os.getenv("CPU_LIMIT", multiprocessing.cpu_count())) # Default to all available CPUs
cpu_per_worker = float(os.getenv("CPU_PER_WORKER", 0.5))
workers = max(int(cpu_limit / cpu_per_worker), 12) # Either 12 workers, or 2 workers per CPU
worker_class = 'uvicorn.workers.UvicornWorker'
keepalive = 10
bind = '0.0.0.0:{{ .Values.dataset.containerPorts.http }}'
wsgi_app = 'dataset_service.main:app'
max_requests_jitter = 200
## Configure the migration container.
migration:
enabled: true
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
extraVolumeMounts:
- name: spicedb-certs
mountPath: /spicedb-certs
## @param dataset.existingConfigmap The name of an existing ConfigMap with your custom configuration for dataset
existingConfigmap:
## @param dataset.existingGunicornConfigmap The name of an existing ConfigMap with your custom configuration for gunicorn
existingGunicornConfigmap:
command: []
args: []
automountServiceAccountToken: true
hostAliases: []
deploymentAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param dataset.podAffinityPreset Pod affinity preset. Ignored if `dataset.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param dataset.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `dataset.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param dataset.nodeAffinityPreset.type Node affinity preset type. Ignored if `dataset.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param dataset.nodeAffinityPreset.key Node label key to match. Ignored if `dataset.affinity` is set
key: ''
## @param dataset.nodeAffinityPreset.values Node label values to match. Ignored if `dataset.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param dataset.affinity Affinity for dataset pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `dataset.podAffinityPreset`, `dataset.podAntiAffinityPreset`, and `dataset.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes:
- name: spicedb-certs
secret:
secretName: '{{ template "toucan-stack.spicedb.tls.secretName" . }}'
items:
- key: ca.crt
path: ca.crt
extraVolumeMounts:
- name: spicedb-certs
mountPath: /spicedb-certs
sidecars: []
initContainers: []
pdb:
create: true
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
##
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
hpa:
enabled: false
minReplicas: ''
maxReplicas: ''
targetCPU: ''
targetMemory: ''
service:
type: ClusterIP
ports:
http: 3000
nodePorts:
http: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## dataset ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
##
## Dataset is a protected resource! Use `nginx.ingress` instead!
ingress:
enabled: false
pathType: ImplementationSpecific
apiVersion: ''
hostname: dataset.local
ingressClassName: ''
path: /
annotations: {}
tls: false
selfSigned: false
extraHosts: []
extraPaths: []
extraTls: []
secrets: []
extraRules: []
## ServiceAccount configuration
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## Prometheus metrics
metrics:
enabled: true
## Prometheus Operator ServiceMonitor configuration
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.ServiceMonitor
##
serviceMonitor:
enabled: false
namespace: ''
annotations: {}
labels: {}
jobLabel: ''
honorLabels: false
interval: ''
scrapeTimeout: ''
metricRelabelings: []
relabelings: []
selector: {}
## @section Impersonate Service Parameters
impersonate:
enabled: true
## Configure impersonate.
## @param impersonate.config Configure impersonate parameters.
##
## Use [[ getenv "VAR_NAME" ]] to inject a secret, in combination with impersonate.initconfig.secrets.
config:
redis:
## Redis address
addr: '{{ printf "%s-master" (include "common.names.fullname" (index .Subcharts "impersonate-redis")) }}:{{ (index .Values "impersonate-redis" "master" "service" "ports" "redis") }}'
oidcImpersonateTokenClient:
## The client ID of the OIDC client
clientId: 'toucan-impersonate-service-client'
## The client secret of the OIDC client
clientSecret: '[[ getenv "TOUCAN_IMPERSONATE_SERVICE_CLIENT_SECRET" ]]'
## Token uri of the OIDC provider
tokenEndpoint: '/oauth/oauth-token'
## The OIDC provider's url
providerUrl: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
## The OIDC impersonate user grant type
userImpersonateGrantType: 'urn:ietf:params:oauth:grant-type:jwt-bearer'
## RSA private key to sign the jwt assertion token
jwtAssertionPrivateKeyPath: '/keys/impersonate.key'
## The OIDC provider's issuer (will be suffixed to tenant ID)
providerIssuer: '-toucan-impersonate-service-client'
## Jwt assertion token expiration in seconds
tokenexpirationinseconds: 3600
## Scope Used to impersonate a user
impersonateUserScope: 'impersonate simple_user email profile'
redisDatabase: 1
oidcEmbedTokenClient:
## The client ID of the OIDC client
clientId: 'toucan-impersonate-service-embed-client'
## The client secret of the OIDC client
clientSecret: '[[ getenv "TOUCAN_IMPERSONATE_SERVICE_EMBED_CLIENT_SECRET" ]]'
## Token uri of the OIDC provider
tokenEndpoint: '/oauth/oauth-token'
## The OIDC provider's url
providerUrl: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
## The OIDC embed user grant type
embedGrantType: 'urn:ietf:params:oauth:grant-type:jwt-bearer'
## RSA private key to sign the jwt assertion token
jwtAssertionPrivateKeyPath: '/keys/embed.key'
## The OIDC provider's issuer (will be suffixed to tenant ID)
providerIssuer: '-toucan-impersonate-service-embed-client'
## Jwt assertion token expiration in seconds
tokenexpirationinseconds: 3600
## Scope Used to impersonate a user
EmbedUserScope: 'embed'
## Redis Database
redisDatabase: 2
jwksConfiguration:
## The JWKS provider's url
providerUrl: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
## The OIDC provider's jwks endpoint
jwksProviderEndpoint: '/oauth/oauth-anonymous/jwks'
## Supported algorithms
jwksAlgorithm:
- 'RS512'
## Supported audiences
jwksAudience: 'toucan-micro-service-client'
## Max age of a jwks in cache
jwksCacheMaxAgeSeconds: 43200
## Max time between two jwks fetch when not found in cache
jwksCacheMaxStaleSeconds: 300
## Trusted issuer uri to validate the jwt issuer
jwksTrustedIssuerUri: '{{ include "toucan-stack.curity.runtime.baseURL" . }}'
jwksTrusterIssuerEndpoint: '/oauth/oauth-anonymous'
redisDatabase: 0
tracing:
## Enable tracing
enable: false
## Sampling mode: always_on, always_off, trace_id_ratio, trace_id_modulo
sampling: always_on
## Sampling rate
SampleRate: 1
## Http exporter
http:
## Endpoint to Grafana Tempo
endpoint: ''
## Url path
urlPath: '/otlp/v1/traces'
## Insecure (http or https)
insecure: false
## Authentication
authentication: true
user: ''
password: '[[ getenv "TRACING_PASSWORD" ]]'
## Attributes
attributes:
## Service name
serviceName: 'impersonate-token-service'
PyroscopeProfiling:
## Enable Pyroscope
enable: false
## Application name
applicationName: impersonate-token
## Pyroscope server address
serverAddress: ''
## Sample rate
sampleRate: 100
## basicAuth
basicAuthEnabled: false
## basic Auth Username
basicAuthUsername: ''
## basic Auth Password
basicAuthPassword: '[[ getenv "PYROSCOPE_PASSWORD" | default "password" ]]'
logging:
debug: false
## @param impersonate.keys RS256 private key used to client credentials authentication.
## The impersonate service uses the RS256 private key to sign the jwt assertion token
## and fetch an API key from Curity (the auth service).
##
## If not set, predefined demo keys will be used.
## Keys are mounted at /keys.
##
## To generate one `openssl genrsa -traditional -out - 2048`.
## impersonatePrivateKey: |-
## -----BEGIN RSA PRIVATE KEY-----
## -----END RSA PRIVATE KEY-----
##
keys:
impersonatePrivateKey: ''
## Note: embedPrivateKey is optional and can be overridden at runtime.
embedPrivateKey: ''
## @param impersonate.keys.existingSecret Name of an existing secret to use for impersonate credentials
## `impersonate.keys.impersonatePrivateKey` and `impersonate.keys.embedPrivateKey` will be ignored
## and picked up from this secret.
## The secret must contain the keys `impersonate-private-key` and `embed-private-key`.
## The value is evaluated as a template.
##
existingSecret: ''
image:
registry: quay.io
repository: toucantoco/impersonate-token-service
# renovate: image=quay.io/toucantoco/impersonate-token-service
tag: 'v0.3.1'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param impersonate.replicaCount Number of impersonate replicas to deploy
replicaCount: 1
containerPorts:
http: 8080
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## impersonate resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 50m
# memory: 64Mi
# limits:
# memory: 256Mi
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
## Configure the initconfig container.
initconfig:
image:
registry: docker.io
repository: hairyhenderson/gomplate
# renovate: image=docker.io/hairyhenderson/gomplate
tag: v4.3.2-alpine
pullPolicy: IfNotPresent
pullSecrets: []
## @param impersonate.initconfig.secrets Dictionary of secrets to add to the initconfig container
secrets:
TOUCAN_IMPERSONATE_SERVICE_CLIENT_SECRET:
key: curity-toucan-impersonate-service-client-secret
name: '{{ include "toucan-stack.curity.oauth2.secretName" . }}'
TOUCAN_IMPERSONATE_SERVICE_EMBED_CLIENT_SECRET:
key: curity-toucan-impersonate-service-embed-client-secret
name: '{{ include "toucan-stack.curity.oauth2.secretName" . }}'
extraEnvVars: {}
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 0
## @param impersonate.existingConfigmap The name of an existing ConfigMap with your custom configuration for impersonate
existingConfigmap:
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
deploymentAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param impersonate.podAffinityPreset Pod affinity preset. Ignored if `impersonate.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param impersonate.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `impersonate.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param impersonate.nodeAffinityPreset.type Node affinity preset type. Ignored if `impersonate.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param impersonate.nodeAffinityPreset.key Node label key to match. Ignored if `impersonate.affinity` is set
key: ''
## @param impersonate.nodeAffinityPreset.values Node label values to match. Ignored if `impersonate.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param impersonate.affinity Affinity for impersonate pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `impersonate.podAffinityPreset`, `impersonate.podAntiAffinityPreset`, and `impersonate.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes: []
extraVolumeMounts: []
sidecars: []
initContainers: []
pdb:
create: true
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
hpa:
enabled: false
minReplicas: ''
maxReplicas: ''
targetCPU: ''
targetMemory: ''
service:
type: ClusterIP
ports:
http: 8080
nodePorts:
http: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## impersonate ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
##
## Impersonate is not meant to be accessible from the outside.
ingress:
enabled: false
pathType: ImplementationSpecific
apiVersion: ''
hostname: impersonate.local
ingressClassName: ''
path: /
annotations: {}
tls: false
selfSigned: false
extraHosts: []
extraPaths: []
extraTls: []
secrets: []
extraRules: []
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## @section SpiceDB Parameters
spicedb:
enabled: true
## Configure spicedb.
config:
datastore:
engine: postgres
## @param spicedb.config.datastore.uri URI to connect to the database.
##
## Set the secrets using the `spicedb.extraEnvVars`
uri: 'postgresql://{{ include "toucan-stack.database.user" . }}:$(PG_PASSWORD)@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.spicedb.database.name" . }}'
auth:
## @param spicedb.config.auth.presharedKey Preshared key to run SpiceDB
presharedKey: ''
## @param spicedb.config.auth.existingSecret Name of an existing secret to use for SpiceDB credentials
## `spicedb.config.auth.presharedKey` will be ignored and picked up from this secret
## The secret must contain the keys `spicedb-preshared-key`.
## The value is evaluated as a template.
existingSecret: ''
## %%% START SPICEDB_SCHEMA %%%
schema: |-
definition user {}
/**
* Represents an embed token with roles & privileges specified in a JSON object.
* Used with wildcard id, and checked with caveats, with the JSON embed context as parameter.
*/
definition embed_token {}
definition toucan {
relation admin: user
permission super_admin = admin
}
definition toucan_sso_group {
relation member: user
}
definition group {
relation parent: tenant
relation admin: user
relation member: user
permission edit = admin
}
definition tenant {
relation toucan: toucan
relation admin: user | group#member
relation member: user | group#member
permission super_admin = admin & toucan->super_admin
permission edit = admin
}
definition workspace {
relation parent: tenant
/**
* Following relations can be written for users, groups (applied to group members), and embed tokens.
* A workspace should always have all these relations to embed tokens wildcards, so that embed tokens
* can be used, with the correct embed context, to access or edit this app.
*/
relation admin: user | group#member | toucan_sso_group#member | embed_token:* with embed_workspace_admin
relation viewer: user | group#member | embed_token:* with embed_workspace_viewer
permission edit = admin + parent->edit
permission access = viewer + edit
}
definition app {
relation parent: workspace
/**
* Following relations can be written for users, groups (applied to group members), and embed tokens.
* An app should always have all these relations to embed tokens wildcards, so that embed tokens
* can be used, with the correct embed context, to access this app.
*/
relation owner: user | group#member | embed_token:* with embed_app_owner
relation editor: user | group#member | embed_token:* with embed_app_editor
relation data_editor: user | group#member | embed_token:* with embed_app_data_editor
relation design_editor: user | group#member | embed_token:* with embed_app_design_editor
relation validator: user | group#member | embed_token:* with embed_app_validator
relation business_specialist: user | group#member | embed_token:* with embed_app_business_specialist
relation viewer: user | group#member | embed_token:* with embed_app_viewer
permission own = owner + parent->edit
permission edit = owner + editor + parent->edit
permission edit_data = data_editor + edit
permission edit_design = design_editor + edit
permission validate = validator + edit_data + edit_design
permission enrich = edit_design + business_specialist
permission view = viewer + validate + enrich
}
definition pdf_report {
relation owner: user
relation viewer: user | app#view
relation editor: user | app#edit_design | app#view
permission view = viewer + editor + owner
permission edit = editor + owner
permission delete = owner + editor
permission share_access = owner + editor
}
definition dashboard {
relation owner: user
relation viewer: user | app#view
relation editor: user | app#edit_design | app#view
relation forkable: app#view
permission view = viewer + editor + owner
permission edit = editor + owner
permission delete = owner + editor
permission share_access = owner + editor
permission fork = forkable
}
/**
* Embed Context Caveats
*
* Following caveats check embed_context to define relations of embed tokens.
* Embed context should always be sent by the client when checking permissions.
* Other parameters are written with the relations, and are compared to parts of embed context.
*
* As embed_context is dynamically typed, we should type check every property we access, to avoid any runtime errors.
*/
/** Checks if embed token is admin of workspace, with propery "role" of embed context */
caveat embed_workspace_admin(embed_context map<any>) {
has(embed_context.roles) &&
type(embed_context.roles) == list &&
embed_context.roles.exists(role, role == "ADMIN" || role == "SUPER_ADMIN")
}
/** Checks if embed token is viewer of workspace, with propery "workspace_id" of embed context */
caveat embed_workspace_viewer(embed_context map<any>, workspace_id string) {
has(embed_context.workspace_id) &&
embed_context.workspace_id == workspace_id
}
/**
* Checks if embed token is owner of app, with "privileges" map of embed context
* Accepted value is "own"
*/
caveat embed_app_owner(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] == "own" ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege == "own"))
)
}
/**
* Checks if embed token is editor of app, with "privileges" map of embed context
* Accepted value is either "contribute" or "edit"
*/
caveat embed_app_editor(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] in ["contribute", "edit"] ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege in ["contribute", "edit"]))
)
}
/**
* Checks if embed token is data-editor of app, with "privileges" map of embed context
* Accepted value is either "contribute-data" or "edit-data"
*/
caveat embed_app_data_editor(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] in ["contribute-data", "edit-data"] ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege in ["contribute-data", "edit-data"]))
)
}
/**
* Checks if embed token is design-editor of app, with "privileges" map of embed context
* Accepted value is either "contribute-design" or "edit-design"
*/
caveat embed_app_design_editor(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] in ["contribute-design", "edit-design"] ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege in ["contribute-design", "edit-design"]))
)
}
/**
* Checks if embed token is validator of app, with "privileges" map of embed context
* Accepted value is "validate"
*/
caveat embed_app_validator(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] == "validate" ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege == "validate"))
)
}
/**
* Checks if embed token is business-specialist of app, with "privileges" map of embed context
* Accepted value is either "business-specialist" or "enrich"
*/
caveat embed_app_business_specialist(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
embed_context.privileges[app_id] in ["business-specialist", "enrich"] ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege in ["business-specialist", "enrich"]))
)
}
/**
* Checks if embed token is viewer of app, with "privileges" map of embed context
* Accepted value is "view"
*/
caveat embed_app_viewer(app_id string, embed_context map<any>) {
has(embed_context.privileges) &&
type(embed_context.privileges) == map &&
embed_context.privileges.exists(key, key == app_id) &&
(
(type(embed_context.privileges[app_id]) == string && embed_context.privileges[app_id] == "view") ||
(type(embed_context.privileges[app_id]) == list && embed_context.privileges[app_id].exists(privilege, privilege == "view"))
)
}
## %%% END SPICEDB_SCHEMA %%%
## @param spicedb.tls.autoGenerated.engine Mechanism to generate the certificates (allowed values: helm, cert-manager)
## @param spicedb.tls.autoGenerated.certManager.existingIssuer The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine)
## @param spicedb.tls.ca CA certificate for TLS.
## @param spicedb.tls.cert TLS certificate for SpiceDB.
## @param spicedb.tls.key TLS key for SpiceDB.
## @param spicedb.tls.existingSecret The name of an existing Secret containing the SpiceDB certificates for TLS
##
tls:
enabled: true # Required
autoGenerated:
enabled: true
engine: helm
certManager:
existingIssuer: ''
existingIssuerKind: ''
keySize: 2048
keyAlgorithm: RSA
duration: 2160h
renewBefore: 360h
ca: ''
cert: ''
key: ''
existingSecret: ''
image:
registry: ghcr.io
repository: authzed/spicedb
# renovate: image=ghcr.io/authzed/spicedb
tag: 'v1.44.4'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
## @param spicedb.replicaCount Number of spicedb replicas to deploy
replicaCount: 1
containerPorts:
grpc: 50051
metrics: 9090
dispatch: 50053
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## spicedb resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 50m
# memory: 128Mi
# limits:
# memory: 256Mi
podSecurityContext:
enabled: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
## Configure the migration container.
migration:
enabled: true
extraEnvVars:
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
extraVolumeMounts: []
## Configure the schema sync sidecar container
schemaSync:
enabled: true
image:
registry: ghcr.io
repository: authzed/zed
# renovate: image=ghcr.io/authzed/zed
tag: 'v0.30.2-debug'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
resourcesPreset: 'none'
resources: {}
extraVolumeMounts: []
## @param spicedb.existingBootstrapConfigmap The name of an existing ConfigMap with the bootstrap configuration
existingBootstrapConfigmap:
command: []
args: []
automountServiceAccountToken: true
hostAliases: []
deploymentAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param spicedb.podAffinityPreset Pod affinity preset. Ignored if `spicedb.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param spicedb.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `spicedb.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param spicedb.nodeAffinityPreset.type Node affinity preset type. Ignored if `spicedb.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param spicedb.nodeAffinityPreset.key Node label key to match. Ignored if `spicedb.affinity` is set
key: ''
## @param spicedb.nodeAffinityPreset.values Node label values to match. Ignored if `spicedb.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param spicedb.affinity Affinity for spicedb pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `spicedb.podAffinityPreset`, `spicedb.podAntiAffinityPreset`, and `spicedb.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
## @param spicedb.extraEnvVars Array with extra environment variables to add to spicedb containers
extraEnvVars:
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes: []
extraVolumeMounts: []
sidecars: []
initContainers: []
pdb:
create: true
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
hpa:
enabled: false
minReplicas: ''
maxReplicas: ''
targetCPU: ''
targetMemory: ''
service:
type: ClusterIP
ports:
grpc: 50051
metrics: 9090
dispatch: 50053
nodePorts:
grpc: 0
metrics: 0
dispatch: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## Prometheus metrics
##
metrics:
enabled: true
## Prometheus Operator ServiceMonitor configuration
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.ServiceMonitor
##
serviceMonitor:
enabled: false
namespace: ''
annotations: {}
labels: {}
jobLabel: ''
honorLabels: false
interval: ''
scrapeTimeout: ''
metricRelabelings: []
relabelings: []
selector: {}
## @section Vault Parameters
## ref: https://github.com/bitnami/charts/tree/main/bitnami/vault/values.yaml
vault:
enabled: true
bootstrap:
enabled: true
logging:
enabled: true
image:
registry: docker.io
repository: library/busybox
# renovate: image=docker.io/library/busybox
tag: '1.37'
oauthapp:
enabled: true
pluginURL: https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v3.1.1/vault-plugin-secrets-oauthapp-v3.1.1-linux-amd64.tar.xz
checksum: c943b505b39b53e1f4cb07f2a3455b59eac523ebf600cb04813b9ad28a848b21
## Image used to install the oauthapp plugin
image:
registry: docker.io
repository: library/alpine
# renovate: image=docker.io/library/alpine
tag: '3.22'
issuerURL: '{{ include "toucan-stack.curity.runtime.baseURL" . }}/{{ .Values.global.tenantID }}/oauth/oauth-anonymous'
token: ''
## @param vault.oauthapp.existingSecret Name of an existing secret to use for Vault secrets.
## `vault.oauthapp.token` will be ignored and picked up from this secret.
## The value is evaluated as a template.
## The secret must contain the keys `vault-token`.
##
existingSecret: ''
injector:
enabled: false
# Feel free to enable it.
ui:
enabled: false
server:
replicaCount: 1
config: |
disable_mlock = true
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:{{ .Values.server.containerPorts.http }}"
cluster_address = "[::]:{{ .Values.server.containerPorts.internal }}"
{{- if .Values.server.metrics.enabled }}
# Enable unauthenticated metrics access (necessary for Prometheus Operator)
telemetry {
unauthenticated_metrics_access = "true"
}
{{- end }}
}
storage "postgresql" {
ha_enabled = "true"
table = "vault_kv_store"
ha_table = "vault_ha_locks"
}
plugin_directory = "/vault/plugins"
plugin_tmpdir = "/tmp"
service_registration "kubernetes" {}
resourcesPreset: 'none'
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
memory: 2Gi
# In a single node setup, disable it to allow restarts.
pdp:
create: false
persistence:
enabled: false
toucanEnvVars:
- name: ADMIN_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: '{{- include "toucan-stack.curity.oauth2.secretName" . -}}'
key: curity-toucan-admin-management-client-secret
- name: MICRO_SERVICE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: '{{- include "toucan-stack.curity.oauth2.secretName" . -}}'
key: curity-toucan-micro-service-client-secret
- name: TOUCAN_VAULT_TOKEN
valueFrom:
secretKeyRef:
name: '{{- include "toucan-stack.vault.oauthapp.secretName" . -}}'
key: vault-token
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
- name: VAULT_PG_CONNECTION_URL
value: 'postgresql://{{ include "toucan-stack.database.user" . }}:$(PG_PASSWORD)@{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ include "toucan-stack.database.port" . }}/{{ include "toucan-stack.vault.database.name" . }}'
lifecycleHooks:
postStart:
exec:
command:
- /bin/sh
- -c
- sleep 30 && bash /bootstrap/bootstrap.sh >> /logs/vault-init.log 2>&1
## @section Curity Server Parameters
curity:
enabled: true
## @param curity.config Configure curity.
##
## The configuration is NOT exhaustive. Feel free to patch the configMap to your needs.
config:
admin:
serviceRole: admin
ui:
enabled: true
httpMode: true
logging:
level: DEBUG
stdout: true
logs:
- curity-init
# - audit
# - request
# - cluster
# - confsvc
# - confsvc-internal
# - post-commit-scripts
runtime:
serviceRole: default
logging:
level: DEBUG
stdout: true
logs:
[]
# - audit
# - request
# - cluster
# - confsvc
# - confsvc-internal
# - post-commit-scripts
sso:
authenticators: []
# - id: oidc
# # Supported type: oidc
# type: oidc
# configurationURL: "https://sso.example.com/.well-known/openid-configuration"
# scopes: openid profile email
# clientID: curity
# clientSecret:
# secretName: 'a'
# secretKey: 'b'
## Permissions Provisioning is a Curity middleware which calls the layout service
## to provision user permissions.
permissionsProvisioning:
secretName: '{{ include "toucan-stack.layout.fullname" . }}'
secretKey: layout-user-provisioning-shared-secret
isAdmin: true
# list of strings (names of groups)
groups:
[]
# - group-1
# - group-2
# map of string (app name) -> string (permission)
# available permissions are 'view', 'edit'
appPermissions:
{}
# app-1: 'view'
# app-2: 'edit'
cluster:
## @param curity.config.cluster.keystore Keystore to use for Curity cluster communication.
## To generate one `docker run --rm -it curity.azurecr.io/curity/idsvr:10.0.1 genclust -c unused -e <encryptionKey>`:
## keystore: "v:S.Tz..."
##
keystore: ''
## @param curity.config.cluster.existingSecret Name of an existing secret to use for Curity cluster secrets.
## `curity.config.cluster.keystore` will be ignored and picked up from this secret.
## The value is evaluated as a template.
## The secret must contain the keys `curity-cluster-keystore`.
##
existingSecret: ''
## @param curity.config.adminPassword Password for the admin user
## If left empty, a random password will be generated.
##
adminPassword: ''
## @param curity.config.encryptionKey Encryption key for the identity server
## The format is a 64 character hex string.
## If left empty, a random key will be generated.
##
encryptionKey: ''
## @param curity.config.existingSecret Name of an existing secret to use for Curity secrets.
## `curity.config.adminPassword` and `curity.config.encryptionKey` will be ignored
## and picked up from this secret.
## The secret must contain the keys `curity-admin-password` and `curity-encryption-key`.
## The value is evaluated as a template.
##
existingSecret: ''
# [Required] @param curity.config.license License key for the identity server
license:
secretName: ''
secretKey: ''
## @param curity.config.allowedOrigins List of origins that can contact the identity server.
allowedOrigins:
- '{{ include "toucan-stack.tucana.baseURL" . }}'
## @param curity.config.redirectURIs List of redirect URIs for the identity server to callback.
redirectURIs:
- '{{ include "toucan-stack.tucana.baseURL" . }}'
- '{{ include "toucan-stack.tucana.baseURL" . }}/oauth/callback'
## @param curity.config.additionalBaseURLs List of additional base URLs to access the identity server.
additionalBaseURLs: []
dataSource:
driver: org.postgresql.Driver
connectionString: jdbc:postgresql://{{ include "toucan-stack.database.host" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.postgresql.primary.service.ports.postgresql }}/curity
username: '{{ include "toucan-stack.database.user" . }}'
## @param curity.config.dataSource.password Name of an existing secret to use for Curity dataSource.password
##
password:
secretName: '{{ include "toucan-stack.database.secretName" . }}'
secretKey: '{{ include "toucan-stack.database.keyName" . }}'
crypto:
## @param curity.config.crypto.default Configure default signing and verification keys.
## @param curity.config.crypto.default.autoGenerated.engine Mechanism to generate the certificates (allowed
## @param curity.config.crypto.default.ca CA certificate for TLS.
## @param curity.config.crypto.default.cert TLS certificate for CUrity.
## @param curity.config.crypto.default.key TLS key for CUrity.
## @param curity.config.crypto.default.existingSecret The name of an existing Secret containing the SpiceDB certificates for TLS
default:
autoGenerated:
enabled: true
engine: helm
certManager:
keySize: 2048
keyAlgorithm: RSA
duration: 87600h # 10y
renewBefore: 360h
cert: ''
key: ''
existingSecret: ''
## @param curity.config.crypto.embedPublicKey Public key used to verify the signature of the jwt assertion token.
## To generate one `openssl rsa -in embed-private-key.pem -pubout`:
## embedPublicKey: |-
## -----BEGIN PUBLIC KEY-----
## -----END PUBLIC KEY-----
##
embedPublicKey: ''
impersonatePublicKey: ''
## @param curity.config.oauth2 Configure Curity OAuth2.
##
## CAUTION: Unless you want to use Curity independently from the rest of the stack,
## you should leave this section empty and use the `global.oauth2` parameters.
oauth2:
clientSecrets:
gatewayToucan: ''
toucanAdminManagement: ''
toucanEmbed: ''
toucanImpersonateService: ''
toucanImpersonateServiceEmbed: ''
toucanInfraAdmin: ''
toucanLaputaWebsocket: ''
toucanMicroService: ''
existingSecret: ''
credentialPolicy:
## @param curity.config.credentialPolicy.dictionary Fetch a list of strings to use as the password dictionary blacklist.
## If you are air-gapped, either set the source to a server serving this file in your local network.
## Or, disable this feature and pass manually the file though a ConfigMap and mount to /opt/idsvr/etc/password-dictionary.txt by using `extraVolumeMounts` and `extraVolumes`.
dictionary:
enabled: true
source: https://gitlab.com/kalilinux/packages/seclists/-/raw/82dcaf3812b5ed14846aadfac524c80e9821afbe/Passwords/Common-Credentials/10-million-password-list-top-1000000.txt
checksum: 1843265e3860a97f417b2236dfa332a0d93b38efef0fee1a0a291fdba5458478
## @param curity.existingConfigmap The name of an existing Secret with the XML configuration.
## Overrides `curity.config`.
##
existingConfigmap: ''
## @param curity.existingClusterConfigmap The name of an existing ConfigMap with the XML cluster configuration.
## This is used to join the runtime nodes to the cluster. It is not recommended to override this.
##
existingClusterConfigmap: ''
images:
pullSecrets: []
curity:
registry: curity.azurecr.io
repository: curity/idsvr
# renovate: image=curity.azurecr.io/curity/idsvr
tag: '10.1.0'
digest: ''
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## @param curity.images.utils Utils image for logging and initialization.
utils:
registry: docker.io
repository: library/busybox
# renovate: image=docker.io/library/busybox
tag: '1.37'
digest: ''
pullPolicy: IfNotPresent
theme:
registry: registry.k8s.io
repository: git-sync/git-sync
# renovate: image=registry.k8s.io/git-sync/git-sync
tag: v4.4.2
digest: ''
pullPolicy: IfNotPresent
admin:
containerPorts:
http: 6789
peer: 6790
metrics: 4466
healthcheck: 4465
admin: 6749
extraContainerPorts:
[]
# - name: myservice
# containerPort: 9090
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## curity resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 2Gi
# limits:
# memory: 2.1Gi
podSecurityContext:
enabled: true
runAsUser: 10001
runAsGroup: 10000
fsGroup: 10000
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
statefulsetAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param curity.admin.podAffinityPreset Pod affinity preset. Ignored if `curity.admin.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param curity.admin.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `curity.admin.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param curity.admin.nodeAffinityPreset.type Node affinity preset type. Ignored if `curity.admin.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param curity.admin.nodeAffinityPreset.key Node label key to match. Ignored if `curity.admin.affinity` is set
key: ''
## @param curity.admin.nodeAffinityPreset.values Node label values to match. Ignored if `curity.admin.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param curity.admin.affinity Affinity for curity admin pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `curity.admin.podAffinityPreset`, `curity.admin.podAntiAffinityPreset`, and `curity.admin.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
## @param curity.admin.extraEnvVars Array with extra environment variables to add to curity containers
extraEnvVars:
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes: []
extraVolumeMounts: []
sidecars: []
initContainers: []
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
service:
type: ClusterIP
ports:
http: 6789
peer: 6790
metrics: 4466
healthcheck: 4465
admin: 6749
nodePorts:
http: 0
peer: 0
metrics: 0
healthcheck: 0
admin: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## curity admin ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
pathType: ImplementationSpecific
## @param curity.admin.ingress.apiVersion Force Ingress API version (automatically detected if not set)
apiVersion: ''
hostname: curity.admin.local
ingressClassName: ''
path: /admin
annotations: {}
## @param curity.admin.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.curity.admin.ingress.hostname }}`
## You can:
## - Use the `ingress.secrets` parameter to create this TLS secret
## - Rely on cert-manager to create it by setting the corresponding annotations
## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
tls: false
## @param curity.admin.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
selfSigned: false
## @param curity.admin.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
## e.g:
## extraHosts:
## - name: curity.admin.local
## path: /
extraHosts: []
## @param curity.admin.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
## e.g:
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
extraPaths: []
## @param curity.admin.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## e.g:
## extraTls:
## - hosts:
## - curity.admin.local
## secretName: curity.admin.local-tls
extraTls: []
## @param curity.admin.ingress.secrets Custom TLS certificates as secrets
## NOTE: 'key' and 'certificate' are expected in PEM format
## NOTE: 'name' should line up with a 'secretName' set further up
## e.g:
## secrets:
## - name: curity.admin.local-tls
## key: |-
## -----BEGIN RSA PRIVATE KEY-----
## ...
## -----END RSA PRIVATE KEY-----
## certificate: |-
## -----BEGIN CERTIFICATE-----
## ...
## -----END CERTIFICATE-----
secrets: []
## @param curity.admin.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
extraRules: []
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
persistence:
enabled: true
mountPath: /opt/idsvr/var/cdb
subPath: ''
## @param laputa.persistence.storageClass Storage class of backing PVC
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
storageClass: ''
annotations: {}
accessModes:
- ReadWriteOnce
size: 8Gi
existingClaim: ''
selector: {}
dataSource: {}
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## Prometheus metrics
metrics:
enabled: true
## Prometheus Operator ServiceMonitor configuration
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.ServiceMonitor
serviceMonitor:
enabled: false
namespace: ''
annotations: {}
labels: {}
jobLabel: ''
honorLabels: false
interval: ''
scrapeTimeout: ''
metricRelabelings: []
relabelings: []
selector: {}
runtime:
containerPorts:
http: 8443
metrics: 4466
healthcheck: 4465
extraContainerPorts:
[]
# - name: myservice
# containerPort: 9090
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## curity resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 500m
# memory: 2Gi
# limits:
# memory: 2.5Gi
podSecurityContext:
enabled: true
runAsUser: 10001
runAsGroup: 10000
fsGroup: 10000
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
statefulsetAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param curity.runtime.podAffinityPreset Pod affinity preset. Ignored if `curity.runtime.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param curity.runtime.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `curity.runtime.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param curity.runtime.nodeAffinityPreset.type Node affinity preset type. Ignored if `curity.runtime.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param curity.runtime.nodeAffinityPreset.key Node label key to match. Ignored if `curity.runtime.affinity` is set
key: ''
## @param curity.runtime.nodeAffinityPreset.values Node label values to match. Ignored if `curity.runtime.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param curity.runtime.affinity Affinity for curity runtime pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `curity.runtime.podAffinityPreset`, `curity.runtime.podAntiAffinityPreset`, and `curity.runtime.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
## @param curity.runtime.extraEnvVars Array with extra environment variables to add to curity containers
extraEnvVars:
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: '{{ include "toucan-stack.database.secretName" . }}'
key: '{{ include "toucan-stack.database.keyName" . }}'
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes: []
extraVolumeMounts: []
sidecars: []
initContainers: []
pdb:
create: true
minAvailable: ''
maxUnavailable: ''
## Autoscaling configuration
## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
hpa:
enabled: false
minReplicas: ''
maxReplicas: ''
targetCPU: ''
targetMemory: ''
service:
type: ClusterIP
ports:
http: 8443
metrics: 4466
healthcheck: 4465
nodePorts:
http: 0
metrics: 0
healthcheck: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## curity runtime ingress parameters
## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/
##
## This must be enabled to access login.
ingress:
enabled: true
pathType: ImplementationSpecific
apiVersion: ''
hostname: curity.runtime.local
ingressClassName: ''
path: /
annotations: {}
## @param curity.runtime.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.curity.runtime.ingress.hostname }}`
## You can:
## - Use the `ingress.secrets` parameter to create this TLS secret
## - Rely on cert-manager to create it by setting the corresponding annotations
## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
##
tls: false
## @param curity.runtime.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## @param curity.runtime.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
## e.g:
## extraHosts:
## - name: curity.runtime.local
## path: /
##
extraHosts: []
## @param curity.runtime.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
## e.g:
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param curity.runtime.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## e.g:
## extraTls:
## - hosts:
## - curity.runtime.local
## secretName: curity.runtime.local-tls
##
extraTls: []
## @param curity.runtime.ingress.secrets Custom TLS certificates as secrets
## NOTE: 'key' and 'certificate' are expected in PEM format
## NOTE: 'name' should line up with a 'secretName' set further up
## e.g:
## secrets:
## - name: curity.runtime.local-tls
## key: |-
## -----BEGIN RSA PRIVATE KEY-----
## ...
## -----END RSA PRIVATE KEY-----
## certificate: |-
## -----BEGIN CERTIFICATE-----
## ...
## -----END CERTIFICATE-----
##
secrets: []
## @param curity.runtime.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
extraRules: []
## ServiceAccount configuration
##
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## Prometheus metrics
##
metrics:
enabled: true
## Prometheus Operator ServiceMonitor configuration
## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.ServiceMonitor
##
serviceMonitor:
enabled: false
namespace: ''
annotations: {}
labels: {}
jobLabel: ''
honorLabels: false
interval: ''
scrapeTimeout: ''
metricRelabelings: []
relabelings: []
selector: {}
## @section Gotenberg Subchart Parameters
## ref: https://github.com/MaikuMori/helm-charts/blob/master/charts/gotenberg/values.yaml
gotenberg:
enabled: true
## @section PostgreSQL Subchart Parameters
## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
## It is used by the dataset, layout, spicedb, vault and curity services.
postgresql:
enabled: true
image:
debug: true
auth:
enablePostgresUser: true
architecture: standalone
primary:
initdb:
scriptsConfigMap: '{{ printf "%s-initdb" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}'
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 256Mi
# limits:
# memory: 2Gi
persistence:
enabled: true
existingClaim: ''
subPath: ''
storageClass: ''
size: 8Gi
## @section MongoDB Parameters
##
## It is used by the laputa service.
mongodb:
enabled: true
auth:
enabled: true
rootUser: admin
rootPassword: ''
user: app
password: ''
readonlyUser: app_readonly
readonlyPassword: ''
## @param mongodb.auth.existingSecret Existing secret with MongoDB(®) credentials (keys: `mongodb-root-password`, `mongodb-app-password`, `mongodb-app-readonly-password`)
## NOTE: When it's set the previous parameters are ignored.
##
existingSecret: ''
## @param mongodb.initdbScriptsConfigMap Override initdb scripts ConfigMap with custom initdb scripts.
initdbScriptsConfigMap: ''
## @param configuration MongoDB(®) configuration file to be used for Primary and Secondary nodes
## For documentation of all options, see: http://docs.mongodb.org/manual/reference/configuration-options/
## Example:
## configuration: |-
## # where and how to store data.
## storage:
## dbPath: /bitnami/mongodb/data/db
## journal:
## enabled: true
## directoryPerDB: false
## # where to write logging data
## systemLog:
## destination: file
## quiet: false
## logAppend: true
## logRotate: reopen
## path: /opt/bitnami/mongodb/logs/mongodb.log
## verbosity: 0
## # network interfaces
## net:
## port: 27017
## unixDomainSocket:
## enabled: true
## pathPrefix: /opt/bitnami/mongodb/tmp
## ipv6: false
## bindIpAll: true
## # replica set options
## #replication:
## #replSetName: replicaset
## #enableMajorityReadConcern: true
## # process management options
## processManagement:
## fork: false
## pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid
## # set parameter options
## setParameter:
## enableLocalhostAuthBypass: true
## # security options
## security:
## authorization: disabled
## #keyFile: /opt/bitnami/mongodb/conf/keyfile
##
configuration: ''
## @param existingConfigmap Name of existing ConfigMap with MongoDB(®) configuration for.
##
existingConfigmap: ''
image:
registry: registry-1.docker.io
repository: library/mongo
# renovate: image=registry-1.docker.io/library/mongo
tag: '8.0.11'
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
containerPorts:
mongo: 27017
extraContainerPorts: []
livenessProbe:
enabled: true
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 10
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 10
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 30
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
## mongodb resource requests and limits
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
resourcesPreset: 'none'
resources:
{}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# memory: 256Mi
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 999
runAsUser: 999
runAsGroup: 999
containerSecurityContext:
enabled: true
seLinuxOptions: {}
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 999
runAsGroup: 999
capabilities:
drop: ['ALL']
seccompProfile:
type: 'RuntimeDefault'
command: []
args: []
automountServiceAccountToken: false
hostAliases: []
statefulsetAnnotations: {}
podLabels: {}
podAnnotations: {}
## @param mongodb.podAffinityPreset Pod affinity preset. Ignored if `mongodb.affinity` is set. Allowed values: `soft` or `hard`
podAffinityPreset: ''
## @param mongodb.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `mongodb.affinity` is set. Allowed values: `soft` or `hard`
podAntiAffinityPreset: soft
nodeAffinityPreset:
## @param mongodb.nodeAffinityPreset.type Node affinity preset type. Ignored if `mongodb.affinity` is set. Allowed values: `soft` or `hard`
type: ''
## @param mongodb.nodeAffinityPreset.key Node label key to match. Ignored if `mongodb.affinity` is set
key: ''
## @param mongodb.nodeAffinityPreset.values Node label values to match. Ignored if `mongodb.affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
values: []
## @param mongodb.affinity Affinity for mongodb pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: `mongodb.podAffinityPreset`, `mongodb.podAntiAffinityPreset`, and `mongodb.nodeAffinityPreset` will be ignored when it's set
affinity: {}
nodeSelector: {}
tolerations: []
updateStrategy:
type: RollingUpdate
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
lifecycleHooks: {}
extraEnvVars: {}
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumes: {}
extraVolumeMounts: {}
sidecars: []
initContainers: []
pdb:
create: false
minAvailable: ''
maxUnavailable: ''
autoscaling:
vpa:
enabled: false
annotations: {}
controlledResources: []
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
service:
type: ClusterIP
ports:
mongo: 27017
nodePorts:
mongo: 0
clusterIP: ''
loadBalancerIP: ''
loadBalancerSourceRanges: []
externalTrafficPolicy: Cluster
annotations: {}
extraPorts: []
sessionAffinity: None
sessionAffinityConfig: {}
networkPolicy:
enabled: true
allowExternal: true
allowExternalEgress: true
addExternalClientAccess: true
extraIngress: []
extraEgress: []
ingressPodMatchLabels: {}
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
persistence:
enabled: true
mountPath: /data/db
subPath: ''
## @param mongodb.persistence.storageClass Storage class of backing PVC
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ''
annotations: {}
accessModes:
- ReadWriteOnce
size: 8Gi
existingClaim: ''
selector: {}
dataSource: {}
serviceAccount:
create: true
name: ''
annotations: {}
automountServiceAccountToken: true
## @section Redis Subchart Parameters
## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml
## It is used by the layout service for caching.
layout-redis:
enabled: true
nameOverride: 'layout-redis'
auth:
enabled: false
master:
persistence:
enabled: false
architecture: standalone
## @section Redis Subchart Parameters
## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml
## It is used by the laputa service for caching.
laputa-redis:
enabled: true
nameOverride: 'laputa-redis'
auth:
enabled: true
master:
persistence:
enabled: false
architecture: standalone
## @section Redis Subchart Parameters
## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml
## It is used by the impersonate service for storing sessions.
impersonate-redis:
enabled: true
nameOverride: 'impersonate-redis'
auth:
enabled: false
master:
persistence:
enabled: false
architecture: standalone
## Post-install job is automatically enabled if spicedb and curity are enabled.
postInstall:
images:
pullSecrets: []
createAdminAccount:
registry: docker.io
repository: alpine/curl
# renovate: image=docker.io/alpine/curl
tag: '8.14.1'
digest: ''
pullPolicy: IfNotPresent
createRelationship:
registry: ghcr.io
repository: authzed/zed
# renovate: image=ghcr.io/authzed/zed
tag: 'v0.30.2-debug'
digest: ''
pullPolicy: IfNotPresent
podAntiAffinityPreset: soft
nodeAffinityPreset:
type: ''
key: ''
values: []
affinity: {}
nodeSelector: {}
tolerations: []
priorityClassName: ''
topologySpreadConstraints: []
schedulerName: ''
terminationGracePeriodSeconds: ''
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
Last updated
Was this helpful?