John Harris 4efddf92c5 [Issue 6355][HELM] autorecovery - could not find or load main class (#6373)
This applies the recommended fix from
https://github.com/apache/pulsar/issues/6355#issuecomment-587756717

Fixes #6355

### Motivation

This PR corrects the configmap data which was causing the autorecovery pod to crashloop
with `could not find or load main class`

### Modifications

Updated the configmap var data per [this comment](https://github.com/apache/pulsar/issues/6355#issuecomment-587756717) from @sijie
2020-02-21 22:07:10 -08:00

524 lines
14 KiB
YAML

#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
## Namespace to deploy pulsar
namespace: pulsar
namespaceCreate: yes
## If persistence is enabled, components that have state will
## be deployed with PersistentVolumeClaims, otherwise, for test
## purposes, they will be deployed with emptyDir
persistence: no
## If prometheus_persistence is enabled, prometheus will be deployed
## with PersistentVolumeClaims, otherwise, for test purposes, they
## will be deployed with emptyDir
prometheus_persistence: yes
prometheus_rbac: yes
## which extra components to deploy
extra:
# Pulsar proxy
proxy: yes
# Bookkeeper auto-recovery
autoRecovery: yes
# Pulsar dashboard
# Deprecated
# Replace pulsar-dashboard with pulsar-manager
dashboard: no
# pulsar manager
pulsar_manager: yes
# Bastion pod for administrative commands
bastion: yes
# Monitoring stack (prometheus and grafana)
monitoring: yes
# Configure Kubernetes runtime for Functions
functionsAsPods: no
## Which pulsar image to use
image:
repository: apachepulsar/pulsar-all
tag: latest
pullPolicy: IfNotPresent
## Pulsar: Zookeeper cluster
## templates/zookeeper-statefulset.yaml
##
zookeeper:
component: zookeeper
replicaCount: 3
updateStrategy:
type: OnDelete
podManagementPolicy: OrderedReady
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
volumes:
data:
name: data
size: 20Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Zookeeper configmap
## templates/zookeeper-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms15g -Xmx15g -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no\""
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
## Zookeeper service
## templates/zookeeper-service.yaml
##
service:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
- name: stats
port: 2181
## Zookeeper PodDisruptionBudget
## templates/zookeeper-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar Zookeeper metadata. The metadata will be deployed as
## soon as the last zookeeper node is reachable. The deployment
## of other components that depends on zookeeper, such as the
## bookkeeper nodes, broker nodes, etc will only start to be
## deployed when the zookeeper cluster is ready and with the
## metadata deployed
zookeeperMetadata:
component: zookeeper-metadata
## Pulsar: Bookkeeper cluster
## templates/bookkeeper-statefulset.yaml
##
bookkeeper:
component: bookkeeper
replicaCount: 4
updateStrategy:
type: OnDelete
podManagementPolicy: OrderedReady
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
volumes:
journal:
name: journal
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
ledgers:
name: ledgers
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Bookkeeper configmap
## templates/bookkeeper-configmap.yaml
##
configData:
BOOKIE_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -verbosegc -XX:G1LogLevel=finest\""
BOOKIE_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
PULSAR_PREFIX_dbStorage_writeCacheMaxSizeMb: "2048"
PULSAR_PREFIX_dbStorage_readAheadCacheMaxSizeMb: "2048"
PULSAR_PREFIX_dbStorage_rocksDB_blockCacheSize: "268435456"
PULSAR_PREFIX_journalMaxSizeMB: "2048"
PULSAR_PREFIX_statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
PULSAR_PREFIX_useHostNameAsBookieID: "true"
## Bookkeeper configmap
## templates/bookkeeper-service.yaml
##
service:
annotations:
publishNotReadyAddresses: "true"
ports:
- name: server
port: 3181
## Bookkeeper PodDisruptionBudget
## templates/bookkeeper-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar: Broker cluster
## templates/broker-deployment.yaml
##
broker:
component: broker
replicaCount: 3
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
## Broker configmap
## templates/broker-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem\""
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
PULSAR_PREFIX_managedLedgerDefaultEnsembleSize: "3"
PULSAR_PREFIX_managedLedgerDefaultWriteQuorum: "3"
PULSAR_PREFIX_managedLedgerDefaultAckQuorum: "2"
PULSAR_PREFIX_deduplicationEnabled: "false"
PULSAR_PREFIX_exposeTopicLevelMetricsInPrometheus: "true"
## Broker service
## templates/broker-service.yaml
##
service:
annotations: {}
ports:
- name: http
port: 8080
- name: pulsar
port: 6650
## Broker PodDisruptionBudget
## templates/broker-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Broker rbac
## templates/broker-rbac.yaml
##
functions:
component: functions-worker
## Pulsar Extra: Proxy
## templates/proxy-deployment.yaml
##
proxy:
component: proxy
replicaCount: 3
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 4Gi
cpu: 1
## Proxy configmap
## templates/proxy-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms4g -Xmx4g -XX:MaxDirectMemorySize=4g\""
## Proxy service
## templates/proxy-service.yaml
##
service:
annotations: {}
type: NodePort
ports:
- name: http
port: 8080
nodePort: 30001
protocol: TCP
- name: tcp
port: 6650
nodePort: 30002
protocol: TCP
## Proxy PodDisruptionBudget
## templates/proxy-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar Extra: Bookkeeper auto-recovery
## templates/autorecovery-deployment.yaml
##
autoRecovery:
component: autorecovery
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 1Gi
cpu: 250m
## Bookkeeper auto-recovery configmap
## templates/autorecovery-configmap.yaml
##
configData:
BOOKIE_MEM: "\" -Xms1g -Xmx1g \""
## Pulsar Extra: Dashboard
## templates/dashboard-deployment.yaml
## Deprecated
##
dashboard:
component: dashboard
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-dashboard
tag: latest
pullPolicy: IfNotPresent
resources:
requests:
memory: 1Gi
cpu: 250m
## Dashboard service
## templates/dashboard-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 80
ingress:
enabled: false
annotations: {}
tls:
enabled: false
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
secretName: ""
## Required if ingress is enabled
hostname: ""
path: "/"
port: 80
## Pulsar Extra: Bastion
## templates/bastion-deployment.yaml
##
bastion:
component: bastion
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 1Gi
cpu: 250m
## Bastion configmap
## templates/bastion-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g\""
## Monitoring Stack: Prometheus
## templates/prometheus-deployment.yaml
##
prometheus:
component: prometheus
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: prom/prometheus
tag: v1.6.3
pullPolicy: IfNotPresent
resources:
requests:
memory: 4Gi
cpu: 1
volumes:
data:
name: data
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-standard
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Prometheus service
## templates/prometheus-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 9090
## Monitoring Stack: Grafana
## templates/grafana-deployment.yaml
##
grafana:
component: grafana
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-grafana
tag: latest
pullPolicy: IfNotPresent
resources:
requests:
memory: 4Gi
cpu: 1
## Grafana service
## templates/grafana-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 3000
plugins: []
## Grafana ingress
## templates/grafana-ingress.yaml
##
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# ingress.kubernetes.io/force-ssl-redirect: "true"
ingress.kubernetes.io/rewrite-target: /
labels: {}
tls: []
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
#- secretName: ""
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
## Required if ingress is enabled
hostname: ""
protocol: http
path: /grafana
port: 80
## Components Stack: pulsar_manager
## templates/pulsar-manager.yaml
##
pulsar_manager:
component: pulsar-manager
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-manager
tag: v0.1.0
pullPolicy: IfNotPresent
resources:
requests:
memory: 250Mi
cpu: 0.1
configData:
REDIRECT_HOST: "http://127.0.0.1"
REDIRECT_PORT: "9527"
DRIVER_CLASS_NAME: org.postgresql.Driver
URL: jdbc:postgresql://127.0.0.1:5432/pulsar_manager
LOG_LEVEL: DEBUG
## If you enabled authentication support
#JWT_TOKEN: <token>
#SECRET_KEY: data:base64,<secret key>
## Pulsar manager service
## templates/pulsar-manager-service.yaml
##
service:
type: LoadBalancer
annotations: {}
ports:
- name: server
port: 9527
admin:
user: pulsar
password: pulsar