[documentation][deploy] Improve helm deployment script to deploy Pulsar to minikube (#2363)
* [documentation][deploy] Update deployment instructions for deploying to Minikube * Enable functions workers * [documentation][deploy] Improve helm deployment script to deploy Pulsar to minikube ### Changes - update the helm scripts: bookie/autorecovery/broker pods should wait until metadata is initialized - disable `autoRecovery` on bookies since we start `AutoRecovery` in separate pods - enable function worker on brokers - provide a values file for minikube - update documentation for using helm chart to deploy a cluster to minikube * move the service type definition to values file
This commit is contained in:
parent
7cfbe4a415
commit
483107e2b9
54
README.md
54
README.md
@ -21,3 +21,57 @@
|
||||
|
||||
This directory contains the Helm Chart required
|
||||
to do a complete Pulsar deployment on Kubernetes.
|
||||
|
||||
## Install Helm
|
||||
|
||||
Before you start, you need to install helm.
|
||||
Following [helm documentation](https://docs.helm.sh/using_helm/#installing-helm) to install it.
|
||||
|
||||
## Deploy Pulsar
|
||||
|
||||
### Minikube
|
||||
|
||||
#### Install Minikube
|
||||
|
||||
[Install and configure minikube](https://github.com/kubernetes/minikube#installation) with
|
||||
a [VM driver](https://github.com/kubernetes/minikube#requirements), e.g. `kvm2` on Linux
|
||||
or `hyperkit` or `VirtualBox` on macOS.
|
||||
|
||||
#### Create a K8S cluster on Minikube
|
||||
|
||||
```
|
||||
minikube start --memory=8192 --cpus=4 \
|
||||
--kubernetes-version=v1.10.5
|
||||
```
|
||||
|
||||
#### Set kubectl to use Minikube.
|
||||
|
||||
```
|
||||
kubectl config use-context minikube
|
||||
```
|
||||
|
||||
After you created a K8S cluster on Minikube, you can access its dashboard via following command:
|
||||
|
||||
```
|
||||
minikube dashboard
|
||||
```
|
||||
|
||||
The command will automatically trigger open a webpage in your browser.
|
||||
|
||||
#### Install Pulsar Chart
|
||||
|
||||
Assume you already cloned pulsar repo in `PULSAR_HOME` directory.
|
||||
|
||||
1. Go to Pulsar helm chart directory
|
||||
```shell
|
||||
cd ${PULSAR_HOME}/deployment/kubernetes/helm
|
||||
```
|
||||
1. Install helm chart.
|
||||
```shell
|
||||
helm install --values pulsar/values-mini.yaml ./pulsar
|
||||
```
|
||||
|
||||
Once the helm chart is completed on installation, you can access the cluster via:
|
||||
|
||||
- Web service url: `http://$(minikube ip):30001/`
|
||||
- Pulsar service url: `pulsar://$(minikube ip):30002/`
|
||||
|
||||
@ -83,7 +83,7 @@ spec:
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
until nslookup {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ add (.Values.zookeeper.replicaCount | int) -1 }}.{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}.{{ .Values.namespace }}; do
|
||||
until bin/pulsar zookeeper-shell -server {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }} ls /admin/clusters/{{ template "pulsar.fullname" . }}; do
|
||||
sleep 3;
|
||||
done;
|
||||
containers:
|
||||
|
||||
@ -33,4 +33,6 @@ data:
|
||||
zkServers:
|
||||
{{- $global := . }}
|
||||
{{ range $i, $e := until (.Values.zookeeper.replicaCount | int) }}{{ if ne $i 0 }},{{ end }}{{ printf "%s-%s-%s-%d.%s-%s-%s" $global.Release.Name $global.Chart.Name $global.Values.zookeeper.component $i $global.Release.Name $global.Chart.Name $global.Values.zookeeper.component }}{{ end }}
|
||||
# disable auto recovery on bookies since we will start AutoRecovery in separated pods
|
||||
autoRecoveryDaemonEnabled: "false"
|
||||
{{ toYaml .Values.bookkeeper.configData | indent 2 }}
|
||||
|
||||
@ -86,7 +86,7 @@ spec:
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
until nslookup {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ add (.Values.zookeeper.replicaCount | int) -1 }}.{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}.{{ .Values.namespace }}; do
|
||||
until bin/pulsar zookeeper-shell -server {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }} ls /admin/clusters/{{ template "pulsar.fullname" . }}; do
|
||||
sleep 3;
|
||||
done;
|
||||
# This initContainer will make sure that the bookeeper
|
||||
|
||||
@ -37,4 +37,6 @@ data:
|
||||
{{- $global := . }}
|
||||
{{ range $i, $e := until (.Values.zookeeper.replicaCount | int) }}{{ if ne $i 0 }},{{ end }}{{ printf "%s-%s-%s-%d.%s-%s-%s" $global.Release.Name $global.Chart.Name $global.Values.zookeeper.component $i $global.Release.Name $global.Chart.Name $global.Values.zookeeper.component }}{{ end }}
|
||||
clusterName: {{ template "pulsar.fullname" . }}
|
||||
functionsWorkerEnabled: "true"
|
||||
PF_pulsarFunctionsCluster: {{ template "pulsar.fullname" . }}
|
||||
{{ toYaml .Values.broker.configData | indent 2 }}
|
||||
|
||||
@ -82,7 +82,7 @@ spec:
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
until nslookup {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ add (.Values.zookeeper.replicaCount | int) -1 }}.{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}.{{ .Values.namespace }}; do
|
||||
until bin/pulsar zookeeper-shell -server {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }} ls /admin/clusters/{{ template "pulsar.fullname" . }}; do
|
||||
sleep 3;
|
||||
done;
|
||||
containers:
|
||||
@ -98,6 +98,7 @@ spec:
|
||||
- >
|
||||
bin/apply-config-from-env.py conf/broker.conf &&
|
||||
bin/apply-config-from-env.py conf/pulsar_env.sh &&
|
||||
bin/gen-yml-from-env.py conf/functions_worker.yml &&
|
||||
bin/pulsar broker
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@ -76,7 +76,13 @@ spec:
|
||||
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.prometheus.component }}-config"
|
||||
configMap:
|
||||
name: "{{ template "pulsar.fullname" . }}-{{ .Values.prometheus.component }}"
|
||||
{{- if not .Values.prometheus_persistence }}
|
||||
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.prometheus.component }}-{{ .Values.prometheus.volumes.data.name }}"
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus_persistence }}
|
||||
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.prometheus.component }}-{{ .Values.prometheus.volumes.data.name }}"
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ template "pulsar.fullname" . }}-{{ .Values.prometheus.component }}-{{ .Values.prometheus.volumes.data.name }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@ -83,7 +83,7 @@ spec:
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
until nslookup {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ add (.Values.zookeeper.replicaCount | int) -1 }}.{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}.{{ .Values.namespace }}; do
|
||||
until bin/pulsar zookeeper-shell -server {{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }} ls /admin/clusters/{{ template "pulsar.fullname" . }}; do
|
||||
sleep 3;
|
||||
done;
|
||||
containers:
|
||||
|
||||
@ -33,7 +33,7 @@ metadata:
|
||||
annotations:
|
||||
{{ toYaml .Values.proxy.service.annotations | indent 4 }}
|
||||
spec:
|
||||
type: NodePort
|
||||
type: {{ .Values.proxy.service.type }}
|
||||
ports:
|
||||
{{ toYaml .Values.proxy.service.ports | indent 2 }}
|
||||
selector:
|
||||
|
||||
@ -46,6 +46,10 @@ spec:
|
||||
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeperMetadata.component }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.zookeeper_metadata.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.zookeeper_metadata.resources | indent 10 }}
|
||||
{{- end }}
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- >
|
||||
|
||||
388
pulsar/values-mini.yaml
Normal file
388
pulsar/values-mini.yaml
Normal file
@ -0,0 +1,388 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
## Namespace to deploy pulsar
|
||||
namespace: pulsar
|
||||
namespaceCreate: yes
|
||||
|
||||
## If persistence is enabled, components that has state will
|
||||
## be deployed with PersistentVolumeClaims, otherwise, for test
|
||||
## purposes, they will be deployed with emptDir
|
||||
persistence: no
|
||||
|
||||
## If prometheus_persistence is enabled, prometheus will be deployed
|
||||
## with PersistentVolumeClaims, otherwise, for test purposes, they
|
||||
## will be deployed with emptyDir
|
||||
prometheus_persistence: no
|
||||
|
||||
## which extra components to deploy
|
||||
extra:
|
||||
# Pulsar proxy
|
||||
proxy: yes
|
||||
# Bookkeeper auto-recovery
|
||||
autoRecovery: yes
|
||||
# Pulsar dashboard
|
||||
dashboard: yes
|
||||
# Bastion pod for administrative commands
|
||||
bastion: yes
|
||||
# Monitoring stack (prometheus and grafana)
|
||||
monitoring: yes
|
||||
|
||||
## Which pulsar image to use
|
||||
image:
|
||||
repository: apachepulsar/pulsar-all
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Pulsar: Zookeeper cluster
|
||||
## templates/zookeeper-statefulset.yaml
|
||||
##
|
||||
zookeeper:
|
||||
component: zookeeper
|
||||
replicaCount: 3
|
||||
updateStrategy:
|
||||
type: OnDelete
|
||||
podManagementPolicy: OrderedReady
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8000"
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.1
|
||||
volumes:
|
||||
data:
|
||||
name: data
|
||||
size: 2Gi
|
||||
## Zookeeper configmap
|
||||
## templates/zookeeper-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\"-Xms64m -Xmx128m -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no\""
|
||||
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
|
||||
## Zookeeper service
|
||||
## templates/zookeeper-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
ports:
|
||||
- name: server
|
||||
port: 2888
|
||||
- name: leader-election
|
||||
port: 3888
|
||||
- name: stats
|
||||
port: 2181
|
||||
## Zookeeper PodDisruptionBudget
|
||||
## templates/zookeeper-pdb.yaml
|
||||
##
|
||||
pdb:
|
||||
usePolicy: yes
|
||||
maxUnavailable: 1
|
||||
|
||||
## Pulsar Zookeeper metadata. The metadata will be deployed as
|
||||
## soon as the las zookeeper node is reachable. The deployment
|
||||
## of other components that depends o zookeeper, such as the
|
||||
## bookkeeper nodes, broker nodes, etc will only start to be
|
||||
## deployed when the zookeeper cluster is ready and with the
|
||||
## metadata deployed
|
||||
zookeeperMetadata:
|
||||
component: zookeeper-metadata
|
||||
|
||||
## Pulsar: Bookkeeper cluster
|
||||
## templates/bookkeeper-statefulset.yaml
|
||||
##
|
||||
bookkeeper:
|
||||
component: bookkeeper
|
||||
replicaCount: 3
|
||||
updateStrategy:
|
||||
type: OnDelete
|
||||
podManagementPolicy: OrderedReady
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8000"
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 0.2
|
||||
volumes:
|
||||
journal:
|
||||
name: journal
|
||||
size: 5Gi
|
||||
ledgers:
|
||||
name: ledgers
|
||||
size: 5Gi
|
||||
## Bookkeeper configmap
|
||||
## templates/bookkeeper-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\"-Xms128m -Xmx256m -XX:MaxDirectMemorySize=128m -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -verbosegc -XX:G1LogLevel=finest\""
|
||||
dbStorage_writeCacheMaxSizeMb: "32"
|
||||
dbStorage_readAheadCacheMaxSizeMb: "32"
|
||||
journalMaxSizeMB: "2048"
|
||||
statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
|
||||
useHostNameAsBookieID: "true"
|
||||
## Bookkeeper configmap
|
||||
## templates/bookkeeper-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations:
|
||||
publishNotReadyAddresses: "true"
|
||||
ports:
|
||||
- name: server
|
||||
port: 3181
|
||||
## Bookkeeper PodDisruptionBudget
|
||||
## templates/bookkeeper-pdb.yaml
|
||||
##
|
||||
pdb:
|
||||
usePolicy: yes
|
||||
maxUnavailable: 1
|
||||
|
||||
## Pulsar: Broker cluster
|
||||
## templates/broker-deployment.yaml
|
||||
##
|
||||
broker:
|
||||
component: broker
|
||||
replicaCount: 2
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 0.2
|
||||
## Broker configmap
|
||||
## templates/broker-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\"-Xms128m -Xmx256m -XX:MaxDirectMemorySize=128m -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem\""
|
||||
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
|
||||
managedLedgerDefaultEnsembleSize: "2"
|
||||
managedLedgerDefaultWriteQuorum: "2"
|
||||
managedLedgerDefaultAckQuorum: "2"
|
||||
deduplicationEnabled: "false"
|
||||
exposeTopicLevelMetricsInPrometheus: "true"
|
||||
## Broker service
|
||||
## templates/broker-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations: {}
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
- name: pulsar
|
||||
port: 6650
|
||||
## Broker PodDisruptionBudget
|
||||
## templates/broker-pdb.yaml
|
||||
##
|
||||
pdb:
|
||||
usePolicy: yes
|
||||
maxUnavailable: 1
|
||||
|
||||
## Pulsar Extra: Proxy
|
||||
## templates/proxy-deployment.yaml
|
||||
##
|
||||
proxy:
|
||||
component: proxy
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.1
|
||||
## Proxy configmap
|
||||
## templates/proxy-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\"-Xms64m -Xmx128m -XX:MaxDirectMemorySize=64m\""
|
||||
## Proxy service
|
||||
## templates/proxy-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations: {}
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
nodePort: 30001
|
||||
protocol: TCP
|
||||
- name: tcp
|
||||
port: 6650
|
||||
nodePort: 30002
|
||||
protocol: TCP
|
||||
## Proxy PodDisruptionBudget
|
||||
## templates/proxy-pdb.yaml
|
||||
##
|
||||
pdb:
|
||||
usePolicy: yes
|
||||
maxUnavailable: 1
|
||||
|
||||
## Pulsar Extra: Bookkeeper auto-recovery
|
||||
## templates/autorecovery-deployment.yaml
|
||||
##
|
||||
autoRecovery:
|
||||
component: autorecovery
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations: {}
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.05
|
||||
## Bookkeeper auto-recovery configmap
|
||||
## templates/autorecovery-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\" -Xms64m -Xmx128m \""
|
||||
|
||||
## Pulsar Extra: Dashboard
|
||||
## templates/dashboard-deployment.yaml
|
||||
##
|
||||
dashboard:
|
||||
component: dashboard
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations: {}
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.1
|
||||
image:
|
||||
repository: apachepulsar/pulsar-dashboard
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
## Dashboard service
|
||||
## templates/dashboard-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations: {}
|
||||
ports:
|
||||
- name: server
|
||||
port: 80
|
||||
|
||||
## Pulsar Extra: Bastion
|
||||
## templates/bastion-deployment.yaml
|
||||
##
|
||||
bastion:
|
||||
component: bastion
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations: {}
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 0.1
|
||||
## Bastion configmap
|
||||
## templates/bastion-configmap.yaml
|
||||
##
|
||||
configData:
|
||||
PULSAR_MEM: "\"-Xms128m -Xmx256m -XX:MaxDirectMemorySize=128m\""
|
||||
|
||||
## Monitoring Stack: Prometheus
|
||||
## templates/prometheus-deployment.yaml
|
||||
##
|
||||
prometheus:
|
||||
component: prometheus
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations: {}
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.1
|
||||
image:
|
||||
repository: prom/prometheus
|
||||
tag: v1.6.3
|
||||
pullPolicy: IfNotPresent
|
||||
volumes:
|
||||
data:
|
||||
name: data
|
||||
size: 2Gi
|
||||
## Prometheus service
|
||||
## templates/prometheus-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations: {}
|
||||
ports:
|
||||
- name: server
|
||||
port: 9090
|
||||
|
||||
## Monitoring Stack: Grafana
|
||||
## templates/grafana-deployment.yaml
|
||||
##
|
||||
grafana:
|
||||
component: grafana
|
||||
replicaCount: 1
|
||||
# nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
annotations: {}
|
||||
tolarations: []
|
||||
gracePeriod: 0
|
||||
resources:
|
||||
requests:
|
||||
memory: 64Mi
|
||||
cpu: 0.1
|
||||
image:
|
||||
repository: apachepulsar/pulsar-grafana
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
## Grafana service
|
||||
## templates/grafana-service.yaml
|
||||
##
|
||||
service:
|
||||
annotations: {}
|
||||
ports:
|
||||
- name: server
|
||||
port: 3000
|
||||
|
||||
zookeeper_metadata:
|
||||
resources:
|
||||
requests:
|
||||
memory: 128Mi
|
||||
cpu: 0.1
|
||||
@ -23,9 +23,14 @@ namespaceCreate: yes
|
||||
|
||||
## If persistence is enabled, components that has state will
|
||||
## be deployed with PersistentVolumeClaims, otherwise, for test
|
||||
## purposes, they will be deployed with emptDir
|
||||
## purposes, they will be deployed with emptyDir
|
||||
persistence: no
|
||||
|
||||
## If prometheus_persistence is enabled, prometheus will be deployed
|
||||
## with PersistentVolumeClaims, otherwise, for test purposes, they
|
||||
## will be deployed with emptyDir
|
||||
prometheus_persistence: yes
|
||||
|
||||
## which extra components to deploy
|
||||
extra:
|
||||
# Pulsar proxy
|
||||
@ -41,7 +46,7 @@ extra:
|
||||
|
||||
## Which pulsar image to use
|
||||
image:
|
||||
repository: apachepulsar/pulsar
|
||||
repository: apachepulsar/pulsar-all
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user