Public Information

This commit is contained in:
2025-01-24 16:18:47 +01:00
commit 0bd2038c86
449 changed files with 108655 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
{{- include "nplus.init" $ -}}
{{- include "nplus.component" . -}}

View File

@@ -0,0 +1,20 @@
{{- if .Values.autoScale }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .component.fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.annotations" . | nindent 4 }}
{{- include "nplus.argoWave" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: StatefulSet
name: {{ .component.fullName }}
minReplicas: {{ .Values.replicaCount }}
maxReplicas: {{ .Values.autoScale }}
targetCPUUtilizationPercentage: 80
{{- end }}

View File

@@ -0,0 +1,39 @@
{{- include "nplus.init" $ -}}
{{- if ( include "nplus.ingressEnabled" . ) }}
{{- include "nplus.ingress" (list . .component.fullName) | nindent 0 }}
- path: {{ .Values.ingress.contextPath }}
pathType: Prefix
backend:
service:
name: {{ .component.fullName }}
port:
name: {{ include "nplus.backendProtocol" . }}
{{- if .Values.ingress.includeDefaultPaths }}
- path: /index.html
pathType: Prefix
backend:
service:
name: {{ .component.fullName }}
port:
name: {{ include "nplus.backendProtocol" . }}
- path: /res
pathType: Prefix
backend:
service:
name: {{ .component.fullName }}
port:
name: {{ include "nplus.backendProtocol" . }}
- path: /engine.properties
pathType: Prefix
backend:
service:
name: {{ .component.fullName }}
port:
name: {{ include "nplus.backendProtocol" . }}
{{- end }}
{{- else }}
# kind: ingress
# Not Generating any Ingress for {{ .component.fullName }} as
# Ingress = {{ .this.ingress }}
# Service = {{ .this.service }}
{{- end }}

View File

@@ -0,0 +1,210 @@
{{- include "nplus.init" $ -}}
{{- $sapIpRange := ( (.Values.snc).sapIpRange | default ((.this.security).cni).sapIpRange ) }}
{{- if ((.this.security).cni).createNetworkPolicy }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ .component.fullName }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoWave" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
{{- include "nplus.securityAnnotations" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "nplus.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
- Egress
ingress:
{{- if ( include "nplus.ingressEnabled" . ) }}
{{- include "nplus.networkpolicy.allowFromIngress" . | nindent 2 }}
{{- end }}
- from:
# access from nappl core in the same instance to setup a cluster
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: core
ports:
{{- include "nplus.napplClusterPolicyPorts" . | nindent 4 }}
- from:
# access from application-layer-setup container in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: application
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
- from:
# access from application-layer-web in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: web
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
- from:
# access from pipeliner in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
# Allow both, Core Mode and AC Mode.
# Core Mode Cluster ist handles above
nplus/component: pipeliner
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
- from:
# access from cmis-connector in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: cmis
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
- from:
# access from ilm-connector in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: ilm
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
- from:
# access from webdav-connector in the same instance
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: webdav
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
# - from:
# # access from xta-connector in the same instance
# - podSelector:
# matchLabels:
# app: xta-connector
# ports:
# - protocol: TCP
# port: {{ (.this.meta).ports.http }}
# - from:
# # access from process-automation-modeler in the same namespace
# - podSelector:
# matchLabels:
# app: process-automation-modeler
# ports:
# - protocol: TCP
# port: {{ (.this.meta).ports.http }}
- from:
# PAM Access
- namespaceSelector:
matchExpressions:
- {key: kubernetes.io/metadata.name, operator: In, values: [{{ .this.security.cni.pamNamespace }}]}
- podSelector:
matchLabels:
nplus/instance: {{ .this.security.cni.pamInstance }}
nplus/component: pam
{{- if ((.this.security).cni).excludeUnusedPorts }}
ports:
{{- include "nplus.defaultPolicyPorts" . | nindent 4 }}
{{- end }}
{{- include "nplus.networkpolicy.allowFromAdmin" . | nindent 2 }}
{{- include "nplus.networkpolicy.allowFromMon" . | nindent 2 }}
egress:
{{- if and (.Values.snc).enabled $sapIpRange }}
#
# Allow access to out-of-cluster SAP Systems for SNC
#
- to:
- ipBlock:
cidr: {{ $sapIpRange }}
{{- end }}
{{- with ((.this.security).cni).dbIpRange }}
#
# Allow access to out-of-cluster DB Systems
#
- to:
- ipBlock:
cidr: {{ . }}
{{- end }}
#
# allow database access in the same instance
#
- to:
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: database
#
# allow access to other cluster pods
#
- to:
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: core
#
# access to storage-layer in the same instance
#
- to:
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: nstl
#
# access to rendition server in the same instance
#
- to:
- podSelector:
matchLabels:
nplus/group: {{ .instance.group }}
nplus/type: rs
{{- if eq ((semver .component.version) | (semver "9.1.1200").Compare) 1 }}
#
# access to Kubernetes API for KubePing in older versions of nappl
#
# {{ .component.version }} is less than 9.1.1200 ({{ semver .component.version | (semver "9.1.1200").Compare }})
# so we add the old kubePing mechanics.
- ports:
- protocol: TCP
port: 16443
- protocol: TCP
port: 443
{{- end }}
{{- end }}

View File

@@ -0,0 +1,2 @@
{{- include "nplus.init" $ -}}
{{- include "nplus.podDisruptionBudget" . -}}

View File

@@ -0,0 +1,2 @@
{{- include "nplus.init" $ -}}
{{- include "nplus.priorityClass" . }}

View File

@@ -0,0 +1,2 @@
{{- include "nplus.init" $ -}}
{{- include "nplus.pvc" . }}

View File

@@ -0,0 +1,60 @@
{{- include "nplus.init" $ -}}
{{- if eq ((semver .component.version) | (semver "9.1.1200").Compare) 1 -}}
{{- if (.Values.kubePing).name -}}
{{- if (.Values.kubePing).create -}}
# {{ .component.version }} is less than 9.1.1200 ({{ semver .component.version | (semver "9.1.1200").Compare }})
# so we add the old kubePing mechanics.
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ tpl .Values.kubePing.name . }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoSharedResource" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
---
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ tpl .Values.kubePing.name . }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoSharedResource" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ tpl .Values.kubePing.name . }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoSharedResource" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ tpl .Values.kubePing.name . }}
subjects:
- kind: ServiceAccount
name: {{ tpl .Values.kubePing.name . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,30 @@
apiVersion: v1
kind: Service
metadata:
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
name: {{ .component.fullName }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoWave" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
{{- include "nplus.securityAnnotations" . | nindent 4 }}
{{- include "nplus.serviceAnnotations" . | nindent 4 }}
spec:
type: ClusterIP
ports:
{{- include "nplus.defaultServicePorts" . | nindent 4 }}
selector:
{{- if eq .this.service.selector "component" }}
{{- include "nplus.selectorLabels" . | nindent 4 }}
{{- else if eq .this.service.selector "type" }}
{{- include "nplus.selectorLabelsNc" . | nindent 4 }}
{{- else }}
{{- fail (printf "Unknown Service Selector Type: %s - must be component or type" .this.service.selector) }}
{{- end }}
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 1800

View File

@@ -0,0 +1,281 @@
{{- include "nplus.init" $ -}}
# Component: {{ .component.chartName }}
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .component.fullName }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoWave" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
{{- include "nplus.securityAnnotations" . | nindent 4 }}
spec:
serviceName: {{ .component.fullName }}
selector:
matchLabels:
{{- include "nplus.selectorLabels" . | nindent 6 }}
{{- if not .Values.autoScale }}
replicas: {{ .Values.replicaCount }}
{{- end }}
podManagementPolicy: OrderedReady
updateStrategy:
type: {{ .Values.updateStrategy | default "OnDelete" }}
minReadySeconds: 5
template:
metadata:
labels:
{{- include "nplus.templateLabels" . | nindent 8 }}
ceyoniq.com/application-layer-cluster-name: "{{ .Release.Name }}"
nplus/jobs: {{ .this.jobs | quote }}
annotations:
{{- include "nplus.templateAnnotations" . | nindent 8 }}
{{- include "nplus.securityAnnotations" . | nindent 8 }}
spec:
{{- include "nplus.priorityClassName" . | nindent 6 }}
{{- include "nplus.templateAffinity" . | nindent 6 }}
{{- include "nplus.imagePullSecrets" . | nindent 6 }}
{{- include "nplus.podSecurityContext" . | nindent 6 }}
{{- include "nplus.terminationGracePeriodSeconds" . | nindent 6 }}
{{- if eq ((semver .component.version) | (semver "9.1.1200").Compare) 1 }}
serviceAccountName: {{ tpl .Values.kubePing.name . }}
{{- else }}
automountServiceAccountToken: false
{{- end }}
{{- include "nplus.securityIllumioReadinessGates" . | nindent 6 }}
initContainers:
{{- include "nplus.waitFor" . | nindent 6 }}
{{- include "nplus.copyConfig" . | nindent 6 }}
# 1. RMS / Administrator funktionieren nicht, wenn die service.conf nicht existiert.
# Die darf leer sein, wird aber wohl vom Admin auf Anwesenheit geprüft.
# 2. Man kann im Admin den NAPPL nicht hochfahren, wenn die dbUrl leer ist. Also füllen wir sie,
# falls sie per ENV gesetzt wird.
- name: make-rms-work
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
{{- include "nplus.initResources" . | nindent 8 }}
command: [ "/bin/sh", "-c" ]
args:
- |
echo "Touching service.conf"
touch /mnt/conf/service.conf
{{- if (.this.database).url }}
echo "dbUrl is set per ENV to {{ (.this.database).url }}"
echo "testing and patching templates/instance1.conf.template"
test -f /mnt/conf/templates/instance1.conf.template \
&& sed -i 's#^core.db.url=$#core.db.url={{ (.this.database).url }}#g' /mnt/conf/templates/instance1.conf.template \
|| echo "/mnt/conf/templates/instance1.conf.template not found. Make sure you run copyconf first"
echo "testing and patching instance1.conf"
test -f /mnt/conf/instance1.conf \
&& sed -i 's#^core.db.url=$#core.db.url={{ (.this.database).url }}#g' /mnt/conf/instance1.conf \
|| echo "/mnt/conf/instance1.conf not found. This is ok, if it is the first start."
echo "done."
{{- end }}
volumeMounts:
- name: conf
subPath: {{ .component.storagePath | quote }}
mountPath: /mnt/conf
containers:
- name: application-layer
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
env:
{{- if (.this.ingress).domain }}
# -- if you use SAML, it is important, that the Application Layer knows its external URL to be able
# to redirect the SAML request correctly. So we provide it here:
- name: SERVER_BASE_URL
value: "https://{{ .this.ingress.domain }}"
{{- end }}
# -- SAP SNC Values
{{- if (.Values.snc).enabled }}
- name: SECUDIR
value: "/opt/snc"
- name: SNC_LIB
value: "/opt/snc/libsapcrypto.so"
{{- else }}
# there is no definition for .Values.snc, so no snc features are rendered
{{- end }}
# -- Instance Configuration Values
- name: AL_APPENDER
value: "stdout"
{{- if (.this.database).url }}
# -- the database definition for the application layer is made within
# the values file or command line settings. Another option would be
# to leave out the database definition completely, then it would read
# the definition from the config file
- name: INSTANCE1_CORE_DB_DIALECT
value: "{{ required "if database.Url is defined, .dialect is required" (.this.database).dialect }}"
- name: INSTANCE1_CORE_DB_DRIVERCLASS
value: "{{ required "if database.Url is defined, .driverclass is required" (.this.database).driverclass }}"
- name: INSTANCE1_CORE_DB_URL
value: "{{ tpl (required "if database.Url is defined, .url is required" (.this.database).url ) . }}"
{{- if (.this.database).secret }}
# -- username and password are taken from a secret
- name: INSTANCE1_CORE_DB_USERNAME
valueFrom:
secretKeyRef:
name: {{ (.this.database).secret }}
key: account
- name: INSTANCE1_CORE_DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ (.this.database).secret }}
key: password
{{- else }}
# -- username and password are taken from
# the manifest. You should not do this in production
# but rather define a secret for it.
- name: INSTANCE1_CORE_DB_USERNAME
value: {{ required "if database.Url is defined, .username (or Secret) is required" (.this.database).account | quote}}
- name: INSTANCE1_CORE_DB_PASSWORD
value: {{ required "if database.Url is defined, .password (or Secret) is required" (.this.database).password | quote }}
{{- end }}
- name: INSTANCE1_CORE_DB_PASSWORD_ENCODED
value: {{ required "if database.Url is defined, .passwordEncoded is required to be true or false" (.this.database).passwordEncoded | quote }}
- name: INSTANCE1_CORE_DB_PASSWORD_ENCODED_STRONG
value: "false"
- name: INSTANCE1_CORE_DB_SCHEMA
value: {{ required "if database.Url is defined, .schema is required" (.this.database).schema | quote }}
{{- else }}
# there is no definition for the database, so no the database definition is left to be defined
# at some other place (ENV Variables in Values File or NAPPL Config File)
{{- end }}
#
# nappl Cluster-Settings. Should be the same for all potential cluster nodes,
# including nappljobs and pipeliner core mode.
#
{{- if lt ((semver .component.version) | (semver "9.1.1200").Compare) 1 }}
# Starting Version 9.1, we need to switch the cluster communication to
# the new jgroups
- name: INSTANCE1_CLUSTER_CORE_STACKTYPE
value: "KUBERNETES"
- name: INSTANCE1_JGROUPS_DNS_QUERY
value: "{{ .component.prefix }}nappl-cluster"
{{- else }}
# Not generating INSTANCE1_CLUSTER_CORE_STACKTYPE into manifest, as this component
# version is {{ .component.version }} so before 9.1.1200 when the new jGroups was
# deployed.
{{- end }}
- name: INSTANCE1_CLUSTER_CORE_CLUSTER_ID
value: "{{ .Release.Namespace }}_{{ .Release.Name }}"
- name: INSTANCE1_CLUSTER_CORE_NAME
value: "{{ .Release.Namespace }}_{{ .Release.Name }}"
- name: INSTANCE1_CLUSTER_CORE_DESCRIPTION
value: "{{ .Release.Name }} Cluster in {{ .Release.Namespace }} namespace"
# -- Jobs
{{- if .Values.jobs }}
# This is a cluster member that is allowed to run jobs, so no changes to the default behaviour
{{- else }}
# These cluster members should not run jobs, only if necessary
- name: INSTANCE1_CLUSTER_CORE_JOB_COORDINATOR_PRIORITY
value: "0"
{{- end }}
# -- # Session Settings
{{- if .Values.disableSessionReplication }}
- name: INSTANCE1_CORE_CLUSTER_SESSION_REPLICATION_DISABLE
value: {{ .Values.disableSessionReplication | quote }}
{{- end }}
{{- if .Values.sessionCacheStorageType }}
- name: SESSION_CACHE_STORAGE_TYPE
value: {{ .Values.sessionCacheStorageType | quote }}
{{- end }}
# TODO: Muss das hier nun rein oder raus?
#
# In der nscalealinst1.conf steht dazu:
# Fulltext index mirror localcache, (on|off) default off
# Instructs the server to write the fulltext index on the local
# file system if set to on.
# It is recommended to set index.mirror.localcache to on when activating
# fulltext for repository or workflow, because this will
# increase the fulltext search performance significantly (see public documentation
# to get information about needed harddisk space).
# The pipeliner has to set this parameter to off.
#
# - name: INSTANCE1_CORE_FULLTEST_INDEX_MIRROR_LOCALCACHE
# value: "off"
- name: METRICS_ALLOW_REMOTE_REQUESTS
value: "true"
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- include "nplus.appDynamicsEnv" . | nindent 10 }}
{{- include "nplus.environment" . | nindent 8 }}
{{- if .this.utils.maintenance }}
{{- include "nplus.idle" . | nindent 8 }}
{{- else }}
startupProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
initialDelaySeconds: 30
failureThreshold: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
livenessProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 10
{{- end }}
ports:
{{- include "nplus.defaultContainerPorts" . | nindent 8 }}
{{- include "nplus.resources" . | nindent 8 }}
volumeMounts:
{{- include "nplus.defaultMounts" . | nindent 8 }}
{{- if (.Values.snc).enabled }}
- name: conf
subPath: "pool/snc"
mountPath: "/opt/snc"
- name: conf
subPath: "pool/snc/libsapcrypto.so"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/libsapcrypto.so"
- name: conf
subPath: "pool/snc/sapjco3.jar"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/sapjco3.jar"
- name: conf
subPath: "pool/snc/libsapjco3.so"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/libsapjco3.so"
{{- end }}
volumes:
{{- include "nplus.defaultVolumes" . | nindent 6 }}