Files
nplus/charts/nappl/templates/statefulset.tpl
2025-01-24 16:18:47 +01:00

282 lines
12 KiB
Smarty

{{- include "nplus.init" $ -}}
# Component: {{ .component.chartName }}
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .component.fullName }}
{{- if .this.utils.includeNamespace }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "nplus.instanceLabels" . | nindent 4 }}
annotations:
{{- include "nplus.argoWave" . | nindent 4 }}
{{- include "nplus.annotations" . | nindent 4 }}
{{- include "nplus.securityAnnotations" . | nindent 4 }}
spec:
serviceName: {{ .component.fullName }}
selector:
matchLabels:
{{- include "nplus.selectorLabels" . | nindent 6 }}
{{- if not .Values.autoScale }}
replicas: {{ .Values.replicaCount }}
{{- end }}
podManagementPolicy: OrderedReady
updateStrategy:
type: {{ .Values.updateStrategy | default "OnDelete" }}
minReadySeconds: 5
template:
metadata:
labels:
{{- include "nplus.templateLabels" . | nindent 8 }}
ceyoniq.com/application-layer-cluster-name: "{{ .Release.Name }}"
nplus/jobs: {{ .this.jobs | quote }}
annotations:
{{- include "nplus.templateAnnotations" . | nindent 8 }}
{{- include "nplus.securityAnnotations" . | nindent 8 }}
spec:
{{- include "nplus.priorityClassName" . | nindent 6 }}
{{- include "nplus.templateAffinity" . | nindent 6 }}
{{- include "nplus.imagePullSecrets" . | nindent 6 }}
{{- include "nplus.podSecurityContext" . | nindent 6 }}
{{- include "nplus.terminationGracePeriodSeconds" . | nindent 6 }}
{{- if eq ((semver .component.version) | (semver "9.1.1200").Compare) 1 }}
serviceAccountName: {{ tpl .Values.kubePing.name . }}
{{- else }}
automountServiceAccountToken: false
{{- end }}
{{- include "nplus.securityIllumioReadinessGates" . | nindent 6 }}
initContainers:
{{- include "nplus.waitFor" . | nindent 6 }}
{{- include "nplus.copyConfig" . | nindent 6 }}
# 1. RMS / Administrator funktionieren nicht, wenn die service.conf nicht existiert.
# Die darf leer sein, wird aber wohl vom Admin auf Anwesenheit geprüft.
# 2. Man kann im Admin den NAPPL nicht hochfahren, wenn die dbUrl leer ist. Also füllen wir sie,
# falls sie per ENV gesetzt wird.
- name: make-rms-work
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
{{- include "nplus.initResources" . | nindent 8 }}
command: [ "/bin/sh", "-c" ]
args:
- |
echo "Touching service.conf"
touch /mnt/conf/service.conf
{{- if (.this.database).url }}
echo "dbUrl is set per ENV to {{ (.this.database).url }}"
echo "testing and patching templates/instance1.conf.template"
test -f /mnt/conf/templates/instance1.conf.template \
&& sed -i 's#^core.db.url=$#core.db.url={{ (.this.database).url }}#g' /mnt/conf/templates/instance1.conf.template \
|| echo "/mnt/conf/templates/instance1.conf.template not found. Make sure you run copyconf first"
echo "testing and patching instance1.conf"
test -f /mnt/conf/instance1.conf \
&& sed -i 's#^core.db.url=$#core.db.url={{ (.this.database).url }}#g' /mnt/conf/instance1.conf \
|| echo "/mnt/conf/instance1.conf not found. This is ok, if it is the first start."
echo "done."
{{- end }}
volumeMounts:
- name: conf
subPath: {{ .component.storagePath | quote }}
mountPath: /mnt/conf
containers:
- name: application-layer
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
env:
{{- if (.this.ingress).domain }}
# -- if you use SAML, it is important, that the Application Layer knows its external URL to be able
# to redirect the SAML request correctly. So we provide it here:
- name: SERVER_BASE_URL
value: "https://{{ .this.ingress.domain }}"
{{- end }}
# -- SAP SNC Values
{{- if (.Values.snc).enabled }}
- name: SECUDIR
value: "/opt/snc"
- name: SNC_LIB
value: "/opt/snc/libsapcrypto.so"
{{- else }}
# there is no definition for .Values.snc, so no snc features are rendered
{{- end }}
# -- Instance Configuration Values
- name: AL_APPENDER
value: "stdout"
{{- if (.this.database).url }}
# -- the database definition for the application layer is made within
# the values file or command line settings. Another option would be
# to leave out the database definition completely, then it would read
# the definition from the config file
- name: INSTANCE1_CORE_DB_DIALECT
value: "{{ required "if database.Url is defined, .dialect is required" (.this.database).dialect }}"
- name: INSTANCE1_CORE_DB_DRIVERCLASS
value: "{{ required "if database.Url is defined, .driverclass is required" (.this.database).driverclass }}"
- name: INSTANCE1_CORE_DB_URL
value: "{{ tpl (required "if database.Url is defined, .url is required" (.this.database).url ) . }}"
{{- if (.this.database).secret }}
# -- username and password are taken from a secret
- name: INSTANCE1_CORE_DB_USERNAME
valueFrom:
secretKeyRef:
name: {{ (.this.database).secret }}
key: account
- name: INSTANCE1_CORE_DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ (.this.database).secret }}
key: password
{{- else }}
# -- username and password are taken from
# the manifest. You should not do this in production
# but rather define a secret for it.
- name: INSTANCE1_CORE_DB_USERNAME
value: {{ required "if database.Url is defined, .username (or Secret) is required" (.this.database).account | quote}}
- name: INSTANCE1_CORE_DB_PASSWORD
value: {{ required "if database.Url is defined, .password (or Secret) is required" (.this.database).password | quote }}
{{- end }}
- name: INSTANCE1_CORE_DB_PASSWORD_ENCODED
value: {{ required "if database.Url is defined, .passwordEncoded is required to be true or false" (.this.database).passwordEncoded | quote }}
- name: INSTANCE1_CORE_DB_PASSWORD_ENCODED_STRONG
value: "false"
- name: INSTANCE1_CORE_DB_SCHEMA
value: {{ required "if database.Url is defined, .schema is required" (.this.database).schema | quote }}
{{- else }}
# there is no definition for the database, so no the database definition is left to be defined
# at some other place (ENV Variables in Values File or NAPPL Config File)
{{- end }}
#
# nappl Cluster-Settings. Should be the same for all potential cluster nodes,
# including nappljobs and pipeliner core mode.
#
{{- if lt ((semver .component.version) | (semver "9.1.1200").Compare) 1 }}
# Starting Version 9.1, we need to switch the cluster communication to
# the new jgroups
- name: INSTANCE1_CLUSTER_CORE_STACKTYPE
value: "KUBERNETES"
- name: INSTANCE1_JGROUPS_DNS_QUERY
value: "{{ .component.prefix }}nappl-cluster"
{{- else }}
# Not generating INSTANCE1_CLUSTER_CORE_STACKTYPE into manifest, as this component
# version is {{ .component.version }} so before 9.1.1200 when the new jGroups was
# deployed.
{{- end }}
- name: INSTANCE1_CLUSTER_CORE_CLUSTER_ID
value: "{{ .Release.Namespace }}_{{ .Release.Name }}"
- name: INSTANCE1_CLUSTER_CORE_NAME
value: "{{ .Release.Namespace }}_{{ .Release.Name }}"
- name: INSTANCE1_CLUSTER_CORE_DESCRIPTION
value: "{{ .Release.Name }} Cluster in {{ .Release.Namespace }} namespace"
# -- Jobs
{{- if .Values.jobs }}
# This is a cluster member that is allowed to run jobs, so no changes to the default behaviour
{{- else }}
# These cluster members should not run jobs, only if necessary
- name: INSTANCE1_CLUSTER_CORE_JOB_COORDINATOR_PRIORITY
value: "0"
{{- end }}
# -- # Session Settings
{{- if .Values.disableSessionReplication }}
- name: INSTANCE1_CORE_CLUSTER_SESSION_REPLICATION_DISABLE
value: {{ .Values.disableSessionReplication | quote }}
{{- end }}
{{- if .Values.sessionCacheStorageType }}
- name: SESSION_CACHE_STORAGE_TYPE
value: {{ .Values.sessionCacheStorageType | quote }}
{{- end }}
# TODO: Muss das hier nun rein oder raus?
#
# In der nscalealinst1.conf steht dazu:
# Fulltext index mirror localcache, (on|off) default off
# Instructs the server to write the fulltext index on the local
# file system if set to on.
# It is recommended to set index.mirror.localcache to on when activating
# fulltext for repository or workflow, because this will
# increase the fulltext search performance significantly (see public documentation
# to get information about needed harddisk space).
# The pipeliner has to set this parameter to off.
#
# - name: INSTANCE1_CORE_FULLTEST_INDEX_MIRROR_LOCALCACHE
# value: "off"
- name: METRICS_ALLOW_REMOTE_REQUESTS
value: "true"
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- include "nplus.appDynamicsEnv" . | nindent 10 }}
{{- include "nplus.environment" . | nindent 8 }}
{{- if .this.utils.maintenance }}
{{- include "nplus.idle" . | nindent 8 }}
{{- else }}
startupProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
initialDelaySeconds: 30
failureThreshold: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
livenessProbe:
httpGet:
path: /jmx/status
port: {{ include "nplus.backendPort" . }}
scheme: {{ include "nplus.backendProtocol" . | upper }}
periodSeconds: 60
timeoutSeconds: 5
failureThreshold: 10
{{- end }}
ports:
{{- include "nplus.defaultContainerPorts" . | nindent 8 }}
{{- include "nplus.resources" . | nindent 8 }}
volumeMounts:
{{- include "nplus.defaultMounts" . | nindent 8 }}
{{- if (.Values.snc).enabled }}
- name: conf
subPath: "pool/snc"
mountPath: "/opt/snc"
- name: conf
subPath: "pool/snc/libsapcrypto.so"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/libsapcrypto.so"
- name: conf
subPath: "pool/snc/sapjco3.jar"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/sapjco3.jar"
- name: conf
subPath: "pool/snc/libsapjco3.so"
mountPath: "/opt/ceyoniq/nscale-server/application-layer/lib/libsapjco3.so"
{{- end }}
volumes:
{{- include "nplus.defaultVolumes" . | nindent 6 }}