Public Information
This commit is contained in:
59
samples/rms/README.md
Normal file
59
samples/rms/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# (virtual-) Remote Management Server
|
||||
|
||||
The *nplus RMS* creates a virtual IP Address in your subnet. On this IP, you will find an *nscale Remote Management Service* and a Layer 4 Proxy, forwarding the ports of the components to the
|
||||
belonging pods.
|
||||
|
||||
The result is, that under this VIP, it looks as if there is a real server with a bunch of *nscale* components installed. So you can use the desktop admin client to connect to it and configure it. Including offline configuration.
|
||||
|
||||
The offline configuration writes settings to the configuration files of the components. These files are injected into the Pods by *nplus* making the legacy magic work again.
|
||||
|
||||
Also, Shotdown, Startup and Restart buttons in the Admin client will work, as that will by translated to Kubernetes commands by *nplus*
|
||||
|
||||
Anyways, there are some restrictions:
|
||||
|
||||
- In a HA scenario, you need multiple virtual server, as nscale does not allow some components to deploy more than one instance per server (like nstl) and they would then also block the default ports. So better to have more RMS
|
||||
- Log Files are not written, so the Admin cannot grab them. So no log file viewing in Admin
|
||||
|
||||
> Please notice that this is a BETA Feature not released for Production use.
|
||||
|
||||
This is a sample of RMS in a HA environment with two virtual servers:
|
||||
|
||||
```yaml
|
||||
components:
|
||||
rmsa: true
|
||||
rmsb: true
|
||||
|
||||
rmsa:
|
||||
ingress:
|
||||
domain: "server1.{{ .instance.group | default .Release.Name }}.lab.nplus.cloud"
|
||||
comps:
|
||||
nappl:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
nstl:
|
||||
enabled: true
|
||||
name: nstla
|
||||
restartReplicas: 1
|
||||
host: "{{ .component.prefix }}nstla.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
rs:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
web:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
rmsb:
|
||||
ingress:
|
||||
domain: "server2.{{ .instance.group | default .Release.Name }}.lab.nplus.cloud"
|
||||
comps:
|
||||
nappl:
|
||||
enabled: true
|
||||
name: nappljobs
|
||||
restartReplicas: 1
|
||||
replicaSetType: StatefulSet
|
||||
host: "{{ .component.prefix }}nappljobs.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
nstl:
|
||||
name: nstlb
|
||||
enabled: true
|
||||
restartReplicas: 1
|
||||
host: "{{ .component.prefix }}nstlb.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
```
|
||||
66
samples/rms/build.sh
Executable file
66
samples/rms/build.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This sample script builds the example as described. It is also used to build the test environment in our lab,
|
||||
# so it should be well tested.
|
||||
#
|
||||
|
||||
# Make sure it fails immediately, if anything goes wrong
|
||||
set -e
|
||||
|
||||
# -- ENVironment variables:
|
||||
# CHARTS: The path to the source code
|
||||
# DEST: The path to the build destination
|
||||
# SAMPLE: The directory of the sample
|
||||
# NAME: The name of the sample, used as the .Release.Name
|
||||
# KUBE_CONTEXT: The name of the kube context, used to build this sample depending on where you run it against. You might have different Environments such as lab, dev, qa, prod, demo, local, ...
|
||||
|
||||
# Check, if we have the source code available
|
||||
if [ ! -d "$CHARTS" ]; then
|
||||
echo "ERROR Building $SAMPLE example: The Charts Sources folder is not set. Please make sure to run this script with the full Source Code available"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d "$DEST" ]; then
|
||||
echo "ERROR Building $SAMPLE example: DEST folder not found."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d "$CHARTS/instance" ]; then
|
||||
echo "ERROR Building $SAMPLE example: Chart Sources in $CHARTS/instance not found. Are you running this script as a subscriber?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set the Variables
|
||||
SAMPLE="administrator-server"
|
||||
NAME="sample-$SAMPLE"
|
||||
|
||||
# Output what is happening
|
||||
echo "Building $NAME"
|
||||
|
||||
# Create the manifest
|
||||
mkdir -p $DEST/instance
|
||||
helm template --debug \
|
||||
--values $SAMPLES/application/empty.yaml \
|
||||
--values $SAMPLES/environment/$KUBE_CONTEXT.yaml \
|
||||
--values $SAMPLES/resources/$KUBE_CONTEXT.yaml \
|
||||
--values $SAMPLES/rms/server.yaml \
|
||||
$NAME $CHARTS/instance > $DEST/instance/$SAMPLE.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
# Set the Variables
|
||||
SAMPLE="administrator-server-ha"
|
||||
NAME="sample-$SAMPLE"
|
||||
|
||||
# Output what is happening
|
||||
echo "Building $NAME"
|
||||
|
||||
# Create the manifest
|
||||
mkdir -p $DEST/instance
|
||||
helm template --debug \
|
||||
--values $SAMPLES/ha/values.yaml \
|
||||
--values $SAMPLES/environment/$KUBE_CONTEXT.yaml \
|
||||
--values $SAMPLES/resources/$KUBE_CONTEXT.yaml \
|
||||
--values $SAMPLES/rms/server-ha.yaml \
|
||||
--values $SAMPLES/application/empty.yaml \
|
||||
$NAME $CHARTS/instance > $DEST/instance/$SAMPLE.yaml
|
||||
|
||||
39
samples/rms/server-ha.yaml
Normal file
39
samples/rms/server-ha.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
components:
|
||||
rmsa: true
|
||||
rmsb: true
|
||||
|
||||
# hier könnte man eine IP setzen, oder nimmt eine aus dem Pool.
|
||||
# Wenn man eine IP setzt, muss sie aus einem Pool kommen.
|
||||
rmsa:
|
||||
ingress:
|
||||
domain: "server1.{{ .instance.group | default .Release.Name }}.lab.nplus.cloud"
|
||||
comps:
|
||||
nappl:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
nstl:
|
||||
enabled: true
|
||||
name: nstla
|
||||
restartReplicas: 1
|
||||
host: "{{ .component.prefix }}nstla.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
rs:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
web:
|
||||
enabled: true
|
||||
restartReplicas: 2
|
||||
rmsb:
|
||||
ingress:
|
||||
domain: "server2.{{ .instance.group | default .Release.Name }}.lab.nplus.cloud"
|
||||
comps:
|
||||
nappl:
|
||||
enabled: true
|
||||
name: nappljobs
|
||||
restartReplicas: 1
|
||||
replicaSetType: StatefulSet
|
||||
host: "{{ .component.prefix }}nappljobs.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
nstl:
|
||||
name: nstlb
|
||||
enabled: true
|
||||
restartReplicas: 1
|
||||
host: "{{ .component.prefix }}nstlb.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
19
samples/rms/server.yaml
Normal file
19
samples/rms/server.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
components:
|
||||
# -- Enable the nplus Remote Management Server / rms
|
||||
rms: true
|
||||
rms:
|
||||
ingress:
|
||||
domain: "admin.{{ .instance.group | default .Release.Name }}.lab.nplus.cloud"
|
||||
# -- This sets the external IP. Has has to come from the Layer 3 Load Balancer Pool, otherwise your
|
||||
# L3 Load Balancer will not be able to assign it.
|
||||
# If you leavet this empty, a VIP will be assigned from the pool
|
||||
externalIp: 10.17.1.49
|
||||
comps:
|
||||
nappl:
|
||||
enabled: true
|
||||
nstl:
|
||||
enabled: true
|
||||
rs:
|
||||
enabled: true
|
||||
web:
|
||||
enabled: true
|
||||
Reference in New Issue
Block a user