Files
nplus/charts/instance/values.yaml

879 lines
42 KiB
YAML
Raw Permalink Normal View History

2025-01-24 16:18:47 +01:00
# yaml-language-server: $schema=values.schema.json
components:
# -- enable a consumer *nscale Application Layer* component in this instance
nappl: true
# -- enable a dedicated jobs *nscale Application Layer* component in this instance
# please also make sure to set the *jobs* setting
nappljobs: false
# -- enable a *nscale Web* component in this instance
web: true
# -- enable a *nscale Monitoring Console* component in this instance
mon: false
# -- enable a *nscale Rendition Server* component in this instance
rs: true
# -- enable a *nscale ILM Connector* component in this instance
ilm: false
# -- enable a *nscale ERP Proxy Connector* component in this instance
erpproxy: false
# -- enable a *nscale ERP CMIS Connector* component in this instance
erpcmis: false
# -- enable a *nscale CMIS Connector* component in this instance
cmis: false
# -- enable an internal *Postgres Database* in this instance
database: true
# -- enable a *nscale Server Storage Layer* component in this instance
# If you are in a **High Availability** scenario, disable this
nstl: true
# -- enable an additional *nscale Server Storage Layer* node in this instance
# within a **High Availability** scenario.
nstla: false
# -- enable an additional *nscale Server Storage Layer* node in this instance
# within a **High Availability** scenario.
nstlb: false
# -- enable an additional *nscale Server Storage Layer* node in this instance
# within a **High Availability** scenario.
nstlc: false
# -- enable an additional *nscale Server Storage Layer* node in this instance
# within a **High Availability** scenario.
nstld: false
# -- enable *nscale Pipeliner* component in this instance
pipeliner: false
# -- deploy any solution using GBA, Standard Apps or shell copy with this generic deployment
# chart
application: false
# -- download, deploy and run any git asset or script prior to installation of the components
prepper: false
# -- enable a *nscale WebDAV Connector* component in this instance
webdav: false
# -- enable a *nscale Administrator Web* component in this instance
administrator: false
# -- enable a *nplus Remote Management Server* component in this instance
# If you are in a **High Availability** scenario, disable this
rms: false
# -- enable an additional *nplus Remote Management Server* in this instance
# within a **High Availability** scenario.
rmsa: false
# -- enable an additional *nplus Remote Management Server* in this instance
# within a **High Availability** scenario.
rmsb: false
# -- enable a *nscale Process Automation Modeler* component in this instance
pam: false
# -- enable a *nscale Sharepoint Connector* component in this instance
sharepoint: false
# -- enable an additional *nscale Sharepoint Connector* component in this instance for
# another set of configuration parameters
sharepointa: false
# -- enable an additional *nscale Sharepoint Connector* component in this instance for
# another set of configuration parameters
sharepointb: false
# -- enable an additional *nscale Sharepoint Connector* component in this instance for
# another set of configuration parameters
sharepointc: false
# -- enable an additional *nscale Sharepoint Connector* component in this instance for
# another set of configuration parameters
sharepointd: false
# -- This section is for the single-instance-mode in which all environement components are integrated
# into the instance
sim:
# -- This is for *Single-Instance-Mode* **only**. Read the docu before enabling this.
# DAV gives you WebDAV access to your conf and ptemp volumes
dav: false
# -- This is for *Single-Instance-Mode* **only**. Read the docu before enabling this.
# the backend components holds the common storages / PVCs for conf and ptemp
# umong other common environmental resources
backend: false
# -- This is for *Single-Instance-Mode* **only**. Read the docu before enabling this.
# The Operator will let you query the Custom Resources for nscale, e.g.
# `kubectl get nscale`
operator: false
# -- This is for *Single-Instance-Mode* **only**. Read the docu before enabling this.
# the toolbox has a git client installed and is suitable for pulling, pushing, copying
# stuff into the pool, fonts, certificates, snippets and configuration files
toolbox: false
#TODO: remove
dmsapi: false
meta:
# -- sets tenant information to be able to invoice per use in a cloud environment
tenant:
# -- sets provider (partner, reseller) information to be able to invoice per use in a cloud environment
provider:
global:
telemetry:
# -- if you use a OpenTelemetry as a telemetry collector, you can enable it here.
# This will add the annotations to some known pods for the injector to use
# agents inside the pods for telemetry collection.
# This often goes along with the `language` setting in the meta section to tell the
# telemetry collector which agent to inject.
openTelemetry:
security:
# -- enables zero trust on the instance.
# When enabled, no unencrypted http connection is allowed.
# This will remove all http ports from pods, services, network policies and ingress rules
zeroTrust:
cni:
# -- if defined, creates a default NetworkPolicy to handle
# ingress Traffic to the instance.
# Possible Values: deny, allow, none
defaultIngressPolicy:
# -- if defined, creates a default NetworkPolicy to handle
# egress Traffic from the instance.
# Possible Values: deny, allow, none
defaultEgressPolicy:
# -- creates NetworkPolicies for each
# component.
createNetworkPolicy:
# -- sets the namespace, from which Administration is
# allowed
administratorNamespace: "{{ .Release.Namespace }}"
# -- sets the instance, from which Administration is
# allowed
administratorInstance: "{{ .this.instance.name }}"
# -- sets the namespace, from which Monitoring is
# allowed
monitoringNamespace: "{{ .Release.Namespace }}"
# -- sets the instance, from which Monitoring is
# allowed
monitoringInstance: "{{ .this.instance.name }}"
# -- sets the namespace, from which Process Automation Modeling is
# allowed
pamNamespace: "{{ .Release.Namespace }}"
# -- sets the instance, from which Process Automation Modeling is
# allowed
pamInstance: "{{ .this.instance.name }}"
instance:
# -- The name of the instance. Should this name be identical to the namespace name, then
# the prefix will be dropped.
# By default, this is the .Release.Name
name: "{{ .Release.Name }}"
# -- The group of the instance. This is used for the networkPolicies. Only Pods within one group are allowed to communicate
# if you enable the nplus Network Policies.
# By default, this is set the same as the instance name
group:
ingress:
# -- Sets the global domain within the instance to be used, if the component does not define any domain.
# If this remains empty, no ingress is generated
# Example: `{{ .instance.group }}.lab.nplus.cloud`
domain:
# -- Sets the root for this instance, where incoming root traffic should be redirected to
appRoot: /nscale_web
# -- Sets the name of the tls secret to be used for this ingress, that contains
# the private and public key. This secret is then either
# generated by cert-manager or self signed by helm - or not created
# @default -- `{{ .this.ingress.domain }}-tls`
secret:
# -- sets the global ingressclass for all components to use - if they do not define a specific
# one, for example if there are separate controllers for internal and external traffic
# @default -- `public``
class:
# -- optionally sets a whitelist of ip ranges (CIDR format, comma separated)
# from which ingress is allowed. This is an annotation for nginx, so won't work with other
# ingress controllers
whitelist:
# -- Sets the name of the issuer to create the tls secret. Very common is to have it created by
# cert-manager. Please see the documentation how to create a cert-manager cluster issuer for example.
# If no issuer is set, no certificate request will be generated
issuer:
# -- Specify the namespace in which the ingress controller runs. This sets the firewall rule / networkPolicy
# to allow traffic from this namespace to our pods. This may be a comma separated list
# @default -- `ingress, kube-system, ingress-nginx`
namespace:
# -- if you do not define an issuer to generate the tls secret for you, you still can have a self signed certificate
# generated for you, if you set this to true. The default is true, so either you have an issuer or not, you will always
# end up with a certificate. Set an empty issuer and createSelfSignedCertificate to false to have no certificate generated
# and use an external or existing secret. Then make sure the secret matches.
createSelfSignedCertificate: true
# -- Globally set the license secret name
license: nscale-license
waitImage:
# -- defines the nplus toolbox image to be used for the *wait* feature
repo: cr.nplus.cloud/subscription
# -- defines the nplus toolbox name to be used for the *wait* feature
name: toolbox2
# -- defines the nplus toolbox tag to be used for the *wait* feature
# @internal -- set by devOps pipeline, so do not modify
tag: 1.2.1300
# -- defines the nplus toolbox pull policy to be used for the *wait* feature
pullPolicy: IfNotPresent
logForwarderImage:
# -- defines the nplus toolbox image to be used for the *wait* feature
repo: cr.fluentbit.io/fluent
# -- defines the nplus toolbox name to be used for the *wait* feature
name: fluent-bit
# -- defines the tag for the logforwarder (FluentBit)
# @internal -- set by devOps pipeline, so do not modify
tag: "2.0"
# -- defines the nplus toolbox pull policy to be used for the *wait* feature
pullPolicy: IfNotPresent
nappl:
# -- sets the *nscale Server Application Layer* host to be used. As this is a global option,
# it can be overridden at component level.
host: "{{ .component.prefix }}nappl.{{ .Release.Namespace }}"
# -- sets the *nscale Server Application Layer* port to be used. As this is a global option,
# it can be overridden at component level.
# if you switch to zero trus mode or change the nappl backend to https, you want to modify this
# port to 8443
port: 8080
# -- wether to use ssl or not for the advanced connector
ssl: false
# -- the instance of *nscale Server Application Layer* to be used
# @internal -- As this is depricated for nscale 10, you should never modify this.
instance: "nscalealinst1"
# -- The technical account to login with
account: admin
# -- The domain of the technical account
domain: nscale
# -- The password of the technical accunt (if not set by secret)
password: admin
# -- An optional secret that holds the credentials (the keys must be `account` and `password`)
secret:
database:
# -- name of the nscale DB
name: "nscale"
# -- nscale DB server dialect
dialect: "PostgreSQL"
# -- nscale DB server driverclass
driverclass: "org.postgresql.Driver"
# -- The URL to the database
url: "jdbc:postgresql://{{ .component.prefix }}database:5432/{{ .this.database.name }}"
# -- DB schema name
schema: "public"
# -- DB account (if not using a secret)
account: "nscale"
# -- DB password (if not using a secret)
password: "nscale"
# -- weather the password is stored encrypted
passwordEncoded: "false"
# -- DB credential secret (account, password)
secret:
meta:
# -- Sets the nscale version of this deployment / instance. This is used by the operator to display
# the correct version e.g. in the Web UI.
# @internal -- this is set by the devOps pipeline, so do not modify
nscaleVersion: "9.3.1300"
nappl:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1300.2024121814
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: application-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}database.{{ .Release.Namespace }}.svc.cluster.local:5432 -timeout 600"
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: "{{ if .this.jobs }}4{{ else }}6{{ end }}"
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1300"
web:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1300.2024121620
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: application-layer-web
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 900"
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 7
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1300"
mon:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1000.2024092618
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: monitoring-console
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1000"
rs:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1301.2024121910
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: rendition-server
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 4
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1301"
nstl:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201.2024112518
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: storage-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
pipeliner:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1300.2024121815
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: pipeliner
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1300"
administrator:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: administrator
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 9
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
cmis:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1200.2024112508
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: cmis-connector
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1200"
ilm:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1000.2024091702
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: ilm-connector
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1000"
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
# -- For the Database, we use a postgres 16
# Ceyoniq uses docker.io/bitnami/postgresql:16
database:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: "16"
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: bitnami/postgresql
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "16"
application:
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
nstl:
# -- sets the dns of the *nscale Server Storage Layer*, that should be configured
host: "{{ .component.prefix }}nstl.{{ .Release.Namespace }}"
rs:
# -- sets the dns of the *nscale Rendition Server*, that should be configured
host: "{{ .component.prefix }}rs.{{ .Release.Namespace }}"
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 11
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1300"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1300.2024121814
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: application-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
nappljobs:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1300.2024121814
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: application-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 4
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1300"
webdav:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1000"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1000.2024091609
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: webdav-connector
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
nstla:
clusterService:
# -- When using multiple nstl Instances with different configurations, you still might want to use a cluster service for HA access
# This will generate one for you.
enabled: true
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201.2024112518
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: storage-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
nstlb:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201.2024112518
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: storage-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
# -- rms is not a Ceyoniq component, but a part of nplus
rms:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 10
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "1.2.1200"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: admin-server
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: 1.2.1200
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: cr.nplus.cloud/subscription
rmsa:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 10
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "1.2.1200"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: admin-server
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: 1.2.1200
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: cr.nplus.cloud/subscription
rmsb:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 10
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "1.2.1200"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: admin-server
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: 1.2.1200
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: cr.nplus.cloud/subscription
nstlc:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201.2024112518
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: storage-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
nstld:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 3
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1201"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1201.2024112518
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: storage-layer
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
pam:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 9
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1200"
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1200.63696
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: process-automation-modeler
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
# -- For SharePoint Connector, there is no entry in Github yet, so we set it hardcoded
# TODO: 9.3: Test again later, if there is a valid github entry.
sharepoint:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1400"
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sharepoint-connector
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1400.2024073012
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
dmsapi:
meta:
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
sharepointa:
clusterService:
# -- When using multiple SharePoint Connectors with different configurations, you still might want to use a retrieval cluster for HA
# so you can enable the clusterService and define the context path.
enabled: false
# -- Set the context Path for the cluster Ingress.
# Make sure also the members are listening to this path
contextPath: "/nscale_spc"
ingress:
# -- Defines the context path of this sharepoint instance, in case you might have multiple instances.
# We do not want them to consume the same ingress path, because it would block the ingress from being
# created.
contextPath: "/nscale_spca"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sharepoint-connector
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1400.2024073012
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1400"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
sharepointb:
ingress:
# -- Defines the context path of this sharepoint instance, in case you might have multiple instances.
# We do not want them to consume the same ingress path, because it would block the ingress from being
# created.
contextPath: "/nscale_spcb"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sharepoint-connector
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1400.2024073012
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1400"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
sharepointc:
ingress:
# -- Defines the context path of this sharepoint instance, in case you might have multiple instances.
# We do not want them to consume the same ingress path, because it would block the ingress from being
# created.
contextPath: "/nscale_spcc"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sharepoint-connector
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1400.2024073012
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1400"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
sharepointd:
ingress:
# -- Defines the context path of this sharepoint instance, in case you might have multiple instances.
# We do not want them to consume the same ingress path, because it would block the ingress from being
# created.
contextPath: "/nscale_spcd"
image:
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sharepoint-connector
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1400.2024073012
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1400"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
prepper:
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: 1.2.1300
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: toolbox2
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: cr.nplus.cloud/subscription
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "1.2.1300"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 2
backend:
meta:
componentVersion: 1.2.1400-124
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 1
erpproxy:
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.3.1000"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.3.1000.2024092409
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: sap-proxy-connector
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/pre-release/nscale
erpcmis:
meta:
# -- This is the version of the component, used for display
# @internal -- set by devOps pipeline, so do not modify
componentVersion: "9.2.1000"
# -- Defines the ArgoCD wave in which this component should be installed.
# This setting only applies to scenarios, where ArgoCD is used as handler
wave: 8
# -- Defines what condition needs to be met before this components starts
waitFor:
- "-service {{ .component.prefix }}nappl.{{ .Release.Namespace }}.svc.cluster.local:{{ .this.nappl.port }} -timeout 1800"
image:
# -- defines the tag for this component
# @internal -- set by devOps pipeline, so do not modify
tag: ubi.9.2.1000.2024032720
# -- sets the name of the image to use for this component
# @internal -- set by devOps pipeline, so do not modify
name: erp-cmis-connector
# -- sets the repo from where to load the image. This can be overridden on environment or instance level
# in case you have your own repo for caching and security reasons
repo: ceyoniq.azurecr.io/release/nscale