Public Information
This commit is contained in:
202
charts/nstl/templates/statefulset.tpl
Normal file
202
charts/nstl/templates/statefulset.tpl
Normal file
@@ -0,0 +1,202 @@
|
||||
{{- include "nplus.init" $ -}}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ .component.fullName }}
|
||||
{{- if .this.utils.includeNamespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "nplus.instanceLabels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- include "nplus.argoWave" . | nindent 4 }}
|
||||
{{- include "nplus.annotations" . | nindent 4 }}
|
||||
{{- include "nplus.securityAnnotations" . | nindent 4 }}
|
||||
spec:
|
||||
|
||||
serviceName: {{ .component.fullName }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "nplus.selectorLabels" . | nindent 6 }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
podManagementPolicy: OrderedReady
|
||||
updateStrategy:
|
||||
type: {{ .Values.updateStrategy | default "OnDelete" }}
|
||||
minReadySeconds: 5
|
||||
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "nplus.templateLabels" . | nindent 8 }}
|
||||
annotations:
|
||||
{{- include "nplus.templateAnnotations" . | nindent 8 }}
|
||||
{{- include "nplus.securityAnnotations" . | nindent 8 }}
|
||||
kubectl.kubernetes.io/default-container: storage-layer
|
||||
spec:
|
||||
{{- include "nplus.imagePullSecrets" . | nindent 6 }}
|
||||
{{- include "nplus.podSecurityContext" . | nindent 6 }}
|
||||
{{- include "nplus.securityIllumioReadinessGates" . | nindent 6 }}
|
||||
{{- include "nplus.templateAffinity" . | nindent 6 }}
|
||||
{{- include "nplus.terminationGracePeriodSeconds" . | nindent 6 }}
|
||||
|
||||
initContainers:
|
||||
{{- include "nplus.waitFor" . | nindent 6 }}
|
||||
{{- include "nplus.copyConfig" . | nindent 6 }}
|
||||
|
||||
{{- if and ((.Values.mounts).disk).enabled ((.Values.mounts).disk).migration }}
|
||||
{{- if or ((.Values.mounts).disk).path ((.Values.mounts).disk).paths }}
|
||||
#TODO: Das könnte man auch noch dynamischer machen.
|
||||
- name: migration
|
||||
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
|
||||
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
|
||||
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
|
||||
{{- include "nplus.initResources" . | nindent 8 }}
|
||||
|
||||
command: [ "/bin/sh", "-c" ]
|
||||
args:
|
||||
- |
|
||||
set -e
|
||||
if [ -z "$( ls -A '/mnt/arc_old' )" ]; then
|
||||
echo "No arc migration necessary"
|
||||
else
|
||||
if [ -f "/mnt/arc_old/.migrated" ]; then
|
||||
echo "Content of arc already migrated to new location on disk. .migrated file found on old location."
|
||||
else
|
||||
echo "Copying content of arc on data to new location on disk (without overwriting files)"
|
||||
cp -rnxv /mnt/arc_old/* /mnt/arc_new/
|
||||
echo "Writing .migrated file to prevent re-migration"
|
||||
echo "migrated" > /mnt/arc_old/.migrated
|
||||
fi
|
||||
fi
|
||||
if [ -z "$( ls -A '/mnt/ret_old' )" ]; then
|
||||
echo "No ret migration necessary"
|
||||
else
|
||||
if [ -f "/mnt/ret_old/.migrated" ]; then
|
||||
echo "Content of ret already migrated to new location on disk. .migrated file found on old location."
|
||||
else
|
||||
echo "Copying content of ret on data to new location on disk (without overwriting files)"
|
||||
cp -rnxv /mnt/ret_old/* /mnt/ret_new/
|
||||
echo "Writing .migrated file to prevent re-migration"
|
||||
echo "migrated" > /mnt/ret_old/.migrated
|
||||
fi
|
||||
fi
|
||||
echo "done."
|
||||
volumeMounts:
|
||||
- name: data
|
||||
subPath: arc
|
||||
mountPath: /mnt/arc_old
|
||||
- name: disk
|
||||
subPath: arc
|
||||
mountPath: /mnt/arc_new
|
||||
- name: data
|
||||
subPath: ret
|
||||
mountPath: /mnt/ret_old
|
||||
- name: disk
|
||||
subPath: ret
|
||||
mountPath: /mnt/ret_new
|
||||
{{- end }}{{/* disk mount definition */}}
|
||||
{{- end }}{{/* Migration and Disk enabled */}}
|
||||
|
||||
{{- if .this.dvCheckPath }}
|
||||
- name: copy-hid
|
||||
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
|
||||
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
|
||||
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
|
||||
{{- include "nplus.initResources" . | nindent 8 }}
|
||||
|
||||
command: [ "/bin/sh", "-c" ]
|
||||
args:
|
||||
- |
|
||||
set -e
|
||||
echo "Checking for DA_HID.DAT in {{ .this.dvCheckPath }}"
|
||||
if [ -f "{{ .this.dvCheckPath }}/DA_HID.DAT" ]; then
|
||||
echo "{{ .this.dvCheckPath }}/DA_HID.DAT found"
|
||||
else
|
||||
echo "{{ .this.dvCheckPath }}/DA_HID.DAT not found, trying to copy from etc"
|
||||
if [ -f "/opt/ceyoniq/nscale-server/storage-layer/etc/DA_HID.DAT" ]; then
|
||||
echo "/opt/ceyoniq/nscale-server/storage-layer/etc/DA_HID.DAT found"
|
||||
echo "copying it"
|
||||
cp -n /opt/ceyoniq/nscale-server/storage-layer/etc/DA_HID.DAT {{ .this.dvCheckPath }}/DA_HID.DAT
|
||||
else
|
||||
echo "/opt/ceyoniq/nscale-server/storage-layer/etc/DA_HID.DAT not found"
|
||||
fi
|
||||
fi
|
||||
echo "done."
|
||||
volumeMounts:
|
||||
{{- include "nplus.defaultMounts" . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
containers:
|
||||
{{- include "nplus.logForwarder" . | nindent 6 }}
|
||||
- name: storage-layer
|
||||
image: {{ include "nplus.image" (dict "global" .Values.global "image" .Values.image) }}
|
||||
imagePullPolicy: {{ include "nplus.imagePullPolicy" .Values.image }}
|
||||
{{- include "nplus.containerSecurityContext" . | nindent 8 }}
|
||||
env:
|
||||
# - name: NSTL_INTERFACE_SSLINTERFACE
|
||||
# value: "1"
|
||||
- name: LOG_APPENDER
|
||||
value: "Console"
|
||||
- name: NSTL_STORAGE-LAYER_LOGLEVEL
|
||||
value: "4"
|
||||
{{- if .this.dvCheckPath }}
|
||||
- name: NSTL_STORAGE-LAYER_DVCHECKPATH
|
||||
value: "{{ .this.dvCheckPath }}"
|
||||
{{- end }}
|
||||
{{- if .this.checkHighestDocId }}
|
||||
- name: NSTL_STORAGE-LAYER_CHECKHIGHESTDOCID
|
||||
value: "{{ .this.checkHighestDocId }}"
|
||||
{{- end }}
|
||||
{{- if .Values.serverID }}
|
||||
- name: NSTL_STORAGE-LAYER_SERVERID
|
||||
value: "{{ .Values.serverID }}"
|
||||
{{- end }}
|
||||
{{- if .this.accounting }}
|
||||
- name: NSTL_ACCOUNTING_ACTIVE
|
||||
value: "1"
|
||||
# This is the base path. In this directory, accounting will create a folder
|
||||
# accounting if not present and publish the csv files there.
|
||||
- name: NSTL_ACCOUNTING_BASEPATH
|
||||
value: "/opt/ceyoniq/nscale-server/storage-layer"
|
||||
{{- end }}
|
||||
|
||||
{{- include "nplus.environment" . | nindent 8 }}
|
||||
{{- if .this.utils.maintenance }}
|
||||
{{- include "nplus.idle" . | nindent 8 }}
|
||||
{{- else }}
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: tcp
|
||||
initialDelaySeconds: 5
|
||||
failureThreshold: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
# -- Ceyoniq does currently not define an *official* livenessProbe, so we use
|
||||
# one that quickly checks the main socket on Layer 4.
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: tcp
|
||||
# initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /opt/ceyoniq/nscale-server/storage-layer/bin/cstool
|
||||
- srv
|
||||
- -m1
|
||||
- -c
|
||||
- sock
|
||||
# initialDelaySeconds: 1
|
||||
failureThreshold: 1
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
{{- include "nplus.defaultContainerPorts" . | nindent 8 }}
|
||||
|
||||
{{- include "nplus.resources" . | nindent 8 }}
|
||||
|
||||
volumeMounts:
|
||||
{{- include "nplus.defaultMounts" . | nindent 8 }}
|
||||
|
||||
volumes:
|
||||
{{- include "nplus.defaultVolumes" . | nindent 6 }}
|
||||
Reference in New Issue
Block a user