From 3ba7973e5e126dfe721a0f9017765a3691798bd9 Mon Sep 17 00:00:00 2001 From: Piers Harding Date: Fri, 1 Jun 2018 10:19:08 +1200 Subject: [PATCH] Add k8s Helm example * fix minikube instructions * reorganise resource descriptor example * add Helm Charts example * document how to use the chart --- Makefile | 12 +- k8s/README.md | 102 ++++++++++++++- k8s/arl-cluster/.helmignore | 21 ++++ k8s/arl-cluster/Chart.yaml | 6 + k8s/arl-cluster/templates/NOTES.txt | 15 +++ k8s/arl-cluster/templates/_helpers.tpl | 32 +++++ .../k8s-dask-notebook-deployment.yml | 71 +++++++++++ .../templates/k8s-dask-notebook-service.yaml | 24 ++++ .../k8s-dask-scheduler-deployment.yml | 105 ++++++++++++++++ .../templates/k8s-dask-scheduler-service.yml | 28 +++++ .../templates/k8s-dask-worker-deployment.yml | 119 ++++++++++++++++++ k8s/arl-cluster/values.yaml | 42 +++++++ .../k8s-dask-notebook-deployment.yml | 0 .../k8s-dask-scheduler-deployment.yml | 0 .../k8s-dask-worker-deployment.yml | 0 15 files changed, 566 insertions(+), 11 deletions(-) create mode 100644 k8s/arl-cluster/.helmignore create mode 100644 k8s/arl-cluster/Chart.yaml create mode 100644 k8s/arl-cluster/templates/NOTES.txt create mode 100644 k8s/arl-cluster/templates/_helpers.tpl create mode 100644 k8s/arl-cluster/templates/k8s-dask-notebook-deployment.yml create mode 100644 k8s/arl-cluster/templates/k8s-dask-notebook-service.yaml create mode 100644 k8s/arl-cluster/templates/k8s-dask-scheduler-deployment.yml create mode 100644 k8s/arl-cluster/templates/k8s-dask-scheduler-service.yml create mode 100644 k8s/arl-cluster/templates/k8s-dask-worker-deployment.yml create mode 100644 k8s/arl-cluster/values.yaml rename k8s/{ => resources}/k8s-dask-notebook-deployment.yml (100%) rename k8s/{ => resources}/k8s-dask-scheduler-deployment.yml (100%) rename k8s/{ => resources}/k8s-dask-worker-deployment.yml (100%) diff --git a/Makefile b/Makefile index a0c5f6d7..198bcd16 100644 --- a/Makefile +++ b/Makefile @@ -109,11 +109,11 @@ endif k8s_deploy_scheduler: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ - envsubst < k8s/k8s-dask-scheduler-deployment.yml | kubectl apply -f - + envsubst < k8s/resources/k8s-dask-scheduler-deployment.yml | kubectl apply -f - k8s_delete_scheduler: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ - envsubst < k8s/k8s-dask-scheduler-deployment.yml | kubectl delete -f - || true + envsubst < k8s/resources/k8s-dask-scheduler-deployment.yml | kubectl delete -f - || true k8s_deploy_worker: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ @@ -121,7 +121,7 @@ k8s_deploy_worker: WORKER_REPLICAS=$(WORKER_REPLICAS) \ WORKER_ARL_DATA=$(WORKER_ARL_DATA) \ NFS_SERVER=$(NFS_SERVER) \ - envsubst < k8s/k8s-dask-worker-deployment.yml | kubectl apply -f - + envsubst < k8s/resources/k8s-dask-worker-deployment.yml | kubectl apply -f - k8s_delete_worker: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ @@ -129,19 +129,19 @@ k8s_delete_worker: WORKER_REPLICAS=$(WORKER_REPLICAS) \ WORKER_ARL_DATA=$(WORKER_ARL_DATA) \ NFS_SERVER=$(NFS_SERVER) \ - envsubst < k8s/k8s-dask-worker-deployment.yml | kubectl delete -f - || true + envsubst < k8s/resources/k8s-dask-worker-deployment.yml | kubectl delete -f - || true k8s_deploy_notebook: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ WORKER_ARL_DATA=$(WORKER_ARL_DATA) \ NFS_SERVER=$(NFS_SERVER) \ - envsubst < k8s/k8s-dask-notebook-deployment.yml | kubectl apply -f - + envsubst < k8s/resources/k8s-dask-notebook-deployment.yml | kubectl apply -f - k8s_delete_notebook: DOCKER_IMAGE=$(DOCKER_REPO)$(DOCKER_IMAGE) \ WORKER_ARL_DATA=$(WORKER_ARL_DATA) \ NFS_SERVER=$(NFS_SERVER) \ - envsubst < k8s/k8s-dask-notebook-deployment.yml | kubectl delete -f - || true + envsubst < k8s/resources/k8s-dask-notebook-deployment.yml | kubectl delete -f - || true docker_notebook: docker_build CTNR=`$(DOCKER) ps -q -f name=$(NAME)_notebook` && \ diff --git a/k8s/README.md b/k8s/README.md index 0ff6672f..391cc782 100644 --- a/k8s/README.md +++ b/k8s/README.md @@ -16,23 +16,28 @@ Using [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) en The generic installation instructions are available at https://kubernetes.io/docs/tasks/tools/install-minikube/. -Minikube requires the Kubernetes runtime, and a host virtualisation layer such as kvm, virtualbox etc. +Minikube requires the Kubernetes runtime, and a host virtualisation layer such as kvm, virtualbox etc. Please refer to the drivers list at https://github.com/kubernetes/minikube/blob/master/docs/drivers.md . -On Ubuntu 18.04, the most straight forward installation pattern is to go with kvm as the host virtualisation layer. +On Ubuntu 18.04, the most straight forward installation pattern is to go with kvm as the host virtualisation layer, and use the kvm2 driver. To install [kvm](http://www.linux-kvm.org/page/Main_Page) on Ubuntu it should be simply a case of: ``` sudo apt-get install qemu-kvm libvirt-bin ubuntu-vm-builder bridge-utils ``` The detailed instructions can be found here https://help.ubuntu.com/community/KVM/Installation. -Once kvm is installed, then the latest version of minikube is found here https://github.com/kubernetes/minikube/releases . Scroll down to the section for Linux, which will have instructions like: +Once kvm is installed, we need to install the kvm2 driver with: +``` +curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-kvm2 && chmod +x docker-machine-driver-kvm2 && sudo mv docker-machine-driver-kvm2 /usr/local/bin/ +``` + +Once the kvm2 driver is installed, then the latest version of minikube is found here https://github.com/kubernetes/minikube/releases . Scroll down to the section for Linux, which will have instructions like: ``` curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.27.0/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ ``` Now we need to bootstrap minikube so that we have a running cluster based on kvm: ``` -minikube start --vm-driver kvm +minikube start --vm-driver kvm2 ``` This will take some time setting up the vm, and bootstrapping Kubernetes. You will see output like the following when done. ``` @@ -118,8 +123,17 @@ docker run -d --name nfs --privileged -p 2049:2049 \ -e SHARED_DIRECTORY=/arl itsthenetwork/nfs-server-alpine:latest 79351289297f54cbcc2e960c0e09143d3f661c96342f0b4e00d18d72d148281c ``` +Choices +------- + +There are two choices for running the ARL on Kubernetes :- +* Resource descriptors +* Helm Chart -For each of the above describe container types, there is a separate resource descriptor file for - scheduler, worker, and notebook. These can be found in the k8s/ directory. + +Resource Descriptors +-------------------- +For each of the above describe container types, there is a separate resource descriptor file for - scheduler, worker, and notebook. These can be found in the [k8s/resources/](k8s/resources/) directory. Launch them all with: ``` make k8s_deploy DOCKER_REPO="" DOCKER_IMAGE=arl_img:latest @@ -159,3 +173,81 @@ Tear down the test with: ``` make k8s_delete DOCKER_REPO="" DOCKER_IMAGE=arl_img:latest ``` + +Helm Chart +---------- + +First you must install [Helm](https://docs.helm.sh/using_helm/#installing-helm), the easiest way is using the install script: +``` +https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash +``` +You must initialise Helm, with `helm init`. This will ensure that the [Tiller](https://docs.helm.sh/glossary/#tiller) component is running which is the Kubernetes API server proxy for Helm. Check this is running correctly with `helm version`. + +Once Helm is up and running, change to the k8s/arl-cluster/ and check the values in in the values.yaml file, in particular the following: +``` +... +worker: + replicaCount: 1 + +image: + repository: arl_img + tag: latest + pullPolicy: IfNotPresent + +jupyter: + password: changeme + +nfs: + server: 192.168.0.168 +... +resources: + limits: + cpu: 500m # 500m = 0.5 CPU + memory: 512Mi # 512Mi = 0.5 GB mem +... +``` +As with the above instructions for Resource Descriptors, the image location must be set to something accessible by every node in Kubernetes. +Change the worker.replicaCount and resources values to something more desirable for your cluster. + +Change directory back to k8s/ and launch helm: +``` +helm install --name arl-cluster/ +``` +Individual values from the values.yaml file can be overridden with: `--set worker.replicaCount=10,resource.limits.cpu=1000m,resource.limits.memory=4098Mi` etc. + +You will get output like the following: +``` +k8s$ helm install --name test arl-cluster/ +NAME: test +LAST DEPLOYED: Fri Jun 1 10:02:58 2018 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1/Service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +notebook-test-arl-cluster ClusterIP 10.108.56.174 8888/TCP 0s +dask-scheduler-test-arl-cluster ClusterIP 10.101.121.34 8786/TCP,8787/TCP 0s + +==> v1/Deployment +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +notebook 1 1 1 0 0s +dask-scheduler 1 1 1 0 0s +dask-worker-test-arl-cluster 1 1 1 0 0s + +==> v1/Pod(related) +NAME READY STATUS RESTARTS AGE +notebook-565795c79f-rgjxg 0/1 ContainerCreating 0 0s +dask-scheduler-68dbfd8fbb-hl6g6 0/1 ContainerCreating 0 0s +dask-worker-test-arl-cluster-6f848cdb6d-2x5v2 0/1 ContainerCreating 0 0s + + +NOTES: +Get the Jupyter Notebook application URL by running these commands: +1. Calculate and export the POD_NAME: + export POD_NAME=$(kubectl get pods --namespace default -l "app=notebook-arl-cluster,release=test" -o jsonpath="{.items[0].metadata.name}") +2. Forward local port 8080 to Jupyter on the POD with: + kubectl port-forward $POD_NAME 8080:8888 +3. Visit http://127.0.0.1:8080 to use your application +``` +Follow the NOTES instructions for accessing the Jupyter Notebook service diff --git a/k8s/arl-cluster/.helmignore b/k8s/arl-cluster/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/k8s/arl-cluster/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/k8s/arl-cluster/Chart.yaml b/k8s/arl-cluster/Chart.yaml new file mode 100644 index 00000000..2af24302 --- /dev/null +++ b/k8s/arl-cluster/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for the ARL on Kubernetes +name: arl-cluster +version: 0.1.0 +icon: https://www.skatelescope.org/wp-content/uploads/2012/05/SDPLOGO-e1443519033747.jpg diff --git a/k8s/arl-cluster/templates/NOTES.txt b/k8s/arl-cluster/templates/NOTES.txt new file mode 100644 index 00000000..edadb5a7 --- /dev/null +++ b/k8s/arl-cluster/templates/NOTES.txt @@ -0,0 +1,15 @@ +Get the Jupyter Notebook application URL by running these commands: +{{- if contains "NodePort" .Values.notebook.service.type }} +1. Calculate and export the NODE_PORT: + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services notebook-{{ template "arl-cluster.fullname" . }}) +2. Calculate and export the NODE_IP: + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") +3. Display the URL for the Jupyter Notebook service: + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "ClusterIP" .Values.notebook.service.type }} +1. Calculate and export the POD_NAME: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app=notebook-{{ template "arl-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +2. Forward local port 8080 to Jupyter on the POD with: + kubectl port-forward $POD_NAME 8080:{{ .Values.notebook.service.jupyter.port }} +3. Visit http://127.0.0.1:8080 to use your application +{{- end }} diff --git a/k8s/arl-cluster/templates/_helpers.tpl b/k8s/arl-cluster/templates/_helpers.tpl new file mode 100644 index 00000000..e78eff9a --- /dev/null +++ b/k8s/arl-cluster/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "arl-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "arl-cluster.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "arl-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/k8s/arl-cluster/templates/k8s-dask-notebook-deployment.yml b/k8s/arl-cluster/templates/k8s-dask-notebook-deployment.yml new file mode 100644 index 00000000..554cc276 --- /dev/null +++ b/k8s/arl-cluster/templates/k8s-dask-notebook-deployment.yml @@ -0,0 +1,71 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notebook + namespace: default + labels: + app: notebook-{{ template "arl-cluster.name" . }} + chart: {{ template "arl-cluster.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: notebook-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + replicas: 1 + template: + metadata: + labels: + k8s-app: notebook-{{ template "arl-cluster.name" . }} + app: notebook-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: notebook-{{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: ARL_DASK_SCHEDULER + value: dask-scheduler-{{ template "arl-cluster.fullname" . }}.default.svc.cluster.local:8786 + - name: JUPYTER_PASSWORD + value: "{{ .Values.jupyter.password }}" + - name: NOTEBOOK_PORT + value: "8888" + volumeMounts: + - mountPath: /arl/data + name: arldata + readOnly: false + ports: + - name: jupyter + containerPort: 8888 + readinessProbe: + httpGet: + path: /api + port: 8888 + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 20 + failureThreshold: 3 + volumes: + # - hostPath: + # path: ${WORKER_ARL_DATA} + # type: DirectoryOrCreate + # name: arldata + - name: arldata + nfs: + server: "{{ .Values.nfs.server }}" + path: "/data" + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/k8s/arl-cluster/templates/k8s-dask-notebook-service.yaml b/k8s/arl-cluster/templates/k8s-dask-notebook-service.yaml new file mode 100644 index 00000000..3c4c423f --- /dev/null +++ b/k8s/arl-cluster/templates/k8s-dask-notebook-service.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: notebook-{{ template "arl-cluster.fullname" . }} + labels: + app: notebook-{{ template "arl-cluster.fullname" . }} + chart: {{ template "arl-cluster.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + # k8s-app: notebook-{{ template "arl-cluster.fullname" . }} +spec: + type: {{ .Values.notebook.service.type }} + ports: + - name: jupyter + port: {{ .Values.notebook.service.jupyter.port }} + targetPort: jupyter + protocol: TCP + selector: + # k8s-app: notebook-{{ template "arl-cluster.name" . }} + app: notebook-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} diff --git a/k8s/arl-cluster/templates/k8s-dask-scheduler-deployment.yml b/k8s/arl-cluster/templates/k8s-dask-scheduler-deployment.yml new file mode 100644 index 00000000..6daa502c --- /dev/null +++ b/k8s/arl-cluster/templates/k8s-dask-scheduler-deployment.yml @@ -0,0 +1,105 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dask-scheduler + namespace: default + labels: + app: dask-scheduler-{{ template "arl-cluster.name" . }} + chart: {{ template "arl-cluster.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: dask-scheduler-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + replicas: 1 + template: + metadata: + labels: + k8s-app: dask-scheduler-{{ template "arl-cluster.name" . }} + app: dask-scheduler-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: scheduler + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /usr/local/bin/start-dask-scheduler.sh + env: + - name: DASK_HOST_NAME + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DASK_SCHEDULER + value: dask-scheduler-{{ template "arl-cluster.fullname" . }}.default.svc.cluster.local + - name: DASK_PORT_SCHEDULER + value: "8786" + - name: DASK_PORT_BOKEH + value: "8787" + - name: DASK_BOKEH_WHITELIST + value: dask-scheduler-{{ template "arl-cluster.fullname" . }}.default.svc.cluster.local + - name: DASK_BOKEH_APP_PREFIX + value: "/" + - name: DASK_LOCAL_DIRECTORY + value: "/var/tmp" + - name: K8S_APP_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DASK_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: DASK_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DASK_SCHEDULER + value: dask-scheduler-{{ template "arl-cluster.fullname" . }}.default.svc.cluster.local + - name: DASK_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: scheduler + resource: limits.cpu + - name: DASK_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: scheduler + resource: limits.memory + volumeMounts: + - mountPath: /var/tmp + name: localdir + readOnly: false + ports: + - name: scheduler + containerPort: 8786 + - name: bokeh + containerPort: 8787 + readinessProbe: + httpGet: + path: /json/identity.json + port: 8787 + initialDelaySeconds: 60 + timeoutSeconds: 10 + periodSeconds: 20 + failureThreshold: 3 + volumes: + - hostPath: + path: /var/tmp + type: DirectoryOrCreate + name: localdir + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/k8s/arl-cluster/templates/k8s-dask-scheduler-service.yml b/k8s/arl-cluster/templates/k8s-dask-scheduler-service.yml new file mode 100644 index 00000000..aea9c1c0 --- /dev/null +++ b/k8s/arl-cluster/templates/k8s-dask-scheduler-service.yml @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: dask-scheduler-{{ template "arl-cluster.fullname" . }} + labels: + app: dask-scheduler-{{ template "arl-cluster.fullname" . }} + chart: {{ template "arl-cluster.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + # k8s-app: dask-scheduler-{{ template "arl-cluster.fullname" . }} +spec: + type: {{ .Values.daskscheduler.service.type }} + ports: + - name: scheduler + port: {{ .Values.daskscheduler.service.scheduler.port }} + targetPort: scheduler + protocol: TCP + - name: bokeh + port: {{ .Values.daskscheduler.service.bokeh.port }} + targetPort: bokeh + protocol: TCP + selector: + # k8s-app: dask-scheduler-{{ template "arl-cluster.name" . }} + app: dask-scheduler-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} diff --git a/k8s/arl-cluster/templates/k8s-dask-worker-deployment.yml b/k8s/arl-cluster/templates/k8s-dask-worker-deployment.yml new file mode 100644 index 00000000..5f766d6a --- /dev/null +++ b/k8s/arl-cluster/templates/k8s-dask-worker-deployment.yml @@ -0,0 +1,119 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dask-worker-{{ template "arl-cluster.fullname" . }} + namespace: default + labels: + app: dask-worker-{{ template "arl-cluster.name" . }} + chart: {{ template "arl-cluster.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: dask-worker-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.worker.replicaCount }} + template: + metadata: + labels: + k8s-app: dask-worker-{{ template "arl-cluster.name" . }} + app: dask-worker-{{ template "arl-cluster.name" . }} + release: {{ .Release.Name }} + spec: + # hostNetwork: true + containers: + - name: worker + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: +{{ toYaml .Values.resources | indent 10 }} + command: + - /usr/local/bin/start-dask-worker.sh + env: + - name: DASK_HOST_NAME + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DASK_SCHEDULER + value: dask-scheduler-{{ template "arl-cluster.fullname" . }}.default.svc.cluster.local + - name: DASK_PORT_NANNY + value: "8789" + - name: DASK_PORT_WORKER + value: "8788" + - name: DASK_PORT_SCHEDULER + value: "8786" + - name: DASK_PORT_BOKEH + value: "8787" + - name: DASK_LOCAL_DIRECTORY + value: "/var/tmp" + - name: DASK_RESOURCES + value: "" + - name: K8S_APP_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DASK_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: DASK_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DASK_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: worker + resource: limits.cpu + - name: DASK_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: worker + resource: limits.memory + volumeMounts: + - mountPath: /var/tmp + name: localdir + readOnly: false + - mountPath: /arl/data + name: arldata + readOnly: false + ports: + - name: worker + containerPort: 8786 + - name: bokeh + containerPort: 8787 + readinessProbe: + httpGet: + path: /json/identity.json + port: 8787 + initialDelaySeconds: 60 + timeoutSeconds: 10 + periodSeconds: 20 + failureThreshold: 3 + volumes: + - hostPath: + path: /var/tmp + type: DirectoryOrCreate + name: localdir + # - hostPath: + # path: ${WORKER_ARL_DATA} + # type: DirectoryOrCreate + # name: arldata + - name: arldata + nfs: + server: "{{ .Values.nfs.server }}" + path: "/data" + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/k8s/arl-cluster/values.yaml b/k8s/arl-cluster/values.yaml new file mode 100644 index 00000000..73385898 --- /dev/null +++ b/k8s/arl-cluster/values.yaml @@ -0,0 +1,42 @@ +# Default values for arl-cluster. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +worker: + replicaCount: 1 + +image: + repository: arl_img + tag: latest + pullPolicy: IfNotPresent + +jupyter: + password: changeme + +nfs: + server: 192.168.0.168 + +notebook: + service: + type: ClusterIP # NodePort or ClusterIP + jupyter: + port: 8888 + +daskscheduler: + service: + type: ClusterIP + scheduler: + port: 8786 + bokeh: + port: 8787 + +resources: + limits: + cpu: 500m # 500m = 0.5 CPU + memory: 512Mi # 512Mi = 0.5 GB mem + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/k8s/k8s-dask-notebook-deployment.yml b/k8s/resources/k8s-dask-notebook-deployment.yml similarity index 100% rename from k8s/k8s-dask-notebook-deployment.yml rename to k8s/resources/k8s-dask-notebook-deployment.yml diff --git a/k8s/k8s-dask-scheduler-deployment.yml b/k8s/resources/k8s-dask-scheduler-deployment.yml similarity index 100% rename from k8s/k8s-dask-scheduler-deployment.yml rename to k8s/resources/k8s-dask-scheduler-deployment.yml diff --git a/k8s/k8s-dask-worker-deployment.yml b/k8s/resources/k8s-dask-worker-deployment.yml similarity index 100% rename from k8s/k8s-dask-worker-deployment.yml rename to k8s/resources/k8s-dask-worker-deployment.yml