Skip to content

Instantly share code, notes, and snippets.

@viklund
Last active December 10, 2019 10:16
Show Gist options
  • Save viklund/c34d4b6bb77ad60beba06e3448e9e0bb to your computer and use it in GitHub Desktop.
Save viklund/c34d4b6bb77ad60beba06e3448e9e0bb to your computer and use it in GitHub Desktop.
Simple LocalEGA Helm setup
#!/usr/bin/env bash
source _script_source.sh
set -e
has_traefik_pods() {
L=$(kubectl get pods -n kube-system -lapp=svclb-traefik 2>/dev/null | wc -l)
if [ $L -eq 0 ]; then
false
else
true
fi
}
has_running_traefik_pods() {
NUM=$(kubectl get -A pods -o json \
| jq '.items[] | .status.conditions[] | select(.type == "Ready" and .reason != "PodCompleted" and .status != "True") | .status' \
| wc -l)
if [ $NUM -eq 0 ]; then
true
else
false
fi
}
start_k3d() {
if ! grep -q $K3DNAME <(k3d ls); then
k3d create \
--name $K3DNAME \
--workers 4 \
--image rancher/k3s:v0.9.1 \
--publish 8080:80 \
--publish 8443:443
#--volume "$PWD/config.toml.tmpl":"/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl" \
echo "Waiting for cluster to create"
sleep 10
export KUBECONFIG="$(k3d get-kubeconfig --name=$K3DNAME)"
until has_traefik_pods; do
echo "Waiting for cluster to create traefik pods"
sleep 10
done
until has_running_traefik_pods; do
echo "Waiting for cluster to have ready traefik pods"
sleep 10
done
## Fix DNS
## Only needed if your local network blocks the google DNS servers
kubectl apply -f <(kubectl -n kube-system get configmap coredns -o yaml | sed 's/\/etc\/resolv.conf/130.238.4.133 130.238.7.10 130.238.164.6/')
fi
}
start_k3d
helm init
kubectl create clusterrolebinding default-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
#!/usr/bin/env bash
source _script_source.sh
set -e
pip install git+https://github.com/neicnordic/LocalEGA-deploy-init
if [ ! -d LocalEGA-helm ]; then
git clone git@github.com:NBISweden/LocalEGA-helm.git
fi
legainit --cega \
--config-path $CHART_PREFIX/localega/config \
--svc-config k3d-conf.json \
--cega-svc-config k3d-conf.json
mkdir -p $CHART_PREFIX/cega/config
cp $CHART_PREFIX/localega/config/cega.* $CHART_PREFIX/localega/config/users.* $CHART_PREFIX/cega/config/
cp -r $CHART_PREFIX/localega/config/certs $CHART_PREFIX/cega/config/
#!/usr/bin/env bash
source _script_source.sh
set -e
echo " ========="
echo " Installing an NFS server"
helm install --namespace nfs --name nfs stable/nfs-server-provisioner
echo " ========="
echo " Installing a minio backend"
MINIO_ACCESS=$(grep s3_access_key $CHART_PREFIX/localega/config/trace.yml | awk {'print $2'} | sed --expression 's/\"//g')
MINIO_SECRET=$(grep s3_secret_key $CHART_PREFIX/localega/config/trace.yml | awk {'print $2'} | sed --expression 's/\"//g')
kubectl create ns minio || true
kubectl -n minio create secret generic minio-certs \
--from-file=$CHART_PREFIX/localega/config/certs/s3.ca.crt \
--from-file=$CHART_PREFIX/localega/config/certs/s3.ca.key
helm install --namespace minio --name s3 \
--set persistence.enabled=false,accessKey=$MINIO_ACCESS,secretKey=$MINIO_SECRET,minioConfig.region=lega,tls.enabled=true,tls.publicCrt=s3.ca.crt,tls.privateKey=s3.ca.key,tls.certSecret=minio-certs \
stable/minio --version 2.5.13
until kubectl -n minio get pods -lapp=minio -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep "Ready=True"; do
echo "waiting for minio to become ready"
sleep 10
done
echo " ========="
echo " Bringing fake central ega online"
helm install --name cega --namespace cega $CHART_PREFIX/cega \
-f $CHART_PREFIX/localega/config/trace.yml \
--set persistence.enabled=false
kubectl create ns lega || true
kubectl apply -f volume-claims.yaml
#!/usr/bin/env bash
source _script_source.sh
set -e
if helm status lega 1>/dev/null 2>&1; then
helm upgrade lega $CHART_PREFIX/localega \
-f $CHART_PREFIX/localega/config/trace.yml \
-f extra-conf.yaml
else
helm install --name lega --namespace lega $CHART_PREFIX/localega \
-f $CHART_PREFIX/localega/config/trace.yml \
-f extra-conf.yaml
fi
persistence:
enabled: true
config:
cega_mq_host: "cega-mq.cega"
cega_users_host: "https://cega-users.cega"
data_storage_s3_bucket: lega
data_storage_s3_region: lega
data_storage_url: "https://s3-minio.minio.svc.cluster.local:9000"
inbox:
persistence:
existingClaim: nfs-inbox-claim
deploy: true
repository: nbisweden/ega-mina-inbox
imageTag: m7
# Java services
dataedge:
deploy: true
repository: nbisweden/ega-dataedge
imageTag: latest
filedatabase:
deploy: true
repository: nbisweden/ega-filedb
imageTag: latest
keys:
deploy: true
repository: nbisweden/ega-keyserver
imageTag: latest
res:
deploy: true
repository: nbisweden/ega-res
imageTag: latest
doa:
deploy: true
repository: nbisweden/ega-doa
imageTag: latest
# Python services
ingest:
deploy: true
coLocateInbox: false
repository: nbisweden/ega-base
imageTag: m7
persistence:
existingClaim: nfs-inbox-claim
finalize:
deploy: true
repository: nbisweden/ega-base
imageTag: m7
verify:
deploy: true
repository: nbisweden/ega-base
imageTag: m7
# Backing services
mq:
persistence:
existingClaim: nfs-mq-claim
postgres:
persistence:
existingClaim: nfs-db-claim
tester:
run: false
imagePullPolicy: Always
[
{"name": "s3", "dns": "s3-minio", "ns": "minio"},
{"name": "keys", "dns": "localega-keys", "ns": "lega"},
{"name": "filedatabase", "dns": "localega-filedatabase", "ns": "lega"},
{"name": "dataedge", "dns": "localega-dataedge", "ns": "lega"},
{"name": "res", "dns": "localega-res", "ns": "lega"},
{"name": "inbox", "dns": "localega-inbox", "ns": "lega"},
{"name": "ingest", "dns": "localega-ingest", "ns": "lega"},
{"name": "finalize", "dns": "localega-finalize", "ns": "lega"},
{"name": "verify", "dns": "localega-verify", "ns": "lega"},
{"name": "mq-server", "dns": "localega-mq", "ns": "lega"},
{"name": "db", "dns": "localega-db", "ns": "lega"},
{"name": "doa", "dns": "localega-doa", "ns": "lega"},
{"name": "htsget", "dns": "localega-htsget", "ns": "lega"},
{"name": "tester", "dns": "localega-tester", "ns": "lega"},
{"name": "cega-users", "ns": "cega"},
{"name": "cega-mq", "ns": "cega"}
]
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-inbox-claim
namespace: lega
spec:
storageClassName: "nfs"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-mq-claim
namespace: lega
spec:
storageClassName: "nfs"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 256Mi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-db-claim
namespace: lega
spec:
storageClassName: "nfs"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 256Mi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment