This patch allows tls to be enabled with an empty secretName for ingress controllers might be able to provide a default certificate. Fixes #5858, provides better defaults for the Ingress object and allows TLS to be enabled with an empty secretName. ### Motivation The current helm chart can create an Ingress with TLS, but it requires a secretName to be added. This is not an Ingress requirement and, in some cases, the ingress controller can provide a default certificate when the Ingress object does not declare one. ### Modifications Modifications include `values.yaml` and `dashboard-ingress.yaml` to address the issue.
467 lines
12 KiB
YAML
467 lines
12 KiB
YAML
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing,
|
|
# software distributed under the License is distributed on an
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
# KIND, either express or implied. See the License for the
|
|
# specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
|
|
## Namespace to deploy pulsar
|
|
namespace: pulsar
|
|
namespaceCreate: yes
|
|
|
|
## If persistence is enabled, components that have state will
|
|
## be deployed with PersistentVolumeClaims, otherwise, for test
|
|
## purposes, they will be deployed with emptyDir
|
|
persistence: no
|
|
|
|
## If prometheus_persistence is enabled, prometheus will be deployed
|
|
## with PersistentVolumeClaims, otherwise, for test purposes, they
|
|
## will be deployed with emptyDir
|
|
prometheus_persistence: yes
|
|
|
|
prometheus_rbac: yes
|
|
|
|
## which extra components to deploy
|
|
extra:
|
|
# Pulsar proxy
|
|
proxy: yes
|
|
# Bookkeeper auto-recovery
|
|
autoRecovery: yes
|
|
# Pulsar dashboard
|
|
# Deprecated
|
|
# Replace pulsar-dashboard with pulsar-manager
|
|
dashboard: no
|
|
# pulsar manager
|
|
pulsar_manager: yes
|
|
# Bastion pod for administrative commands
|
|
bastion: yes
|
|
# Monitoring stack (prometheus and grafana)
|
|
monitoring: yes
|
|
|
|
## Which pulsar image to use
|
|
image:
|
|
repository: apachepulsar/pulsar-all
|
|
tag: latest
|
|
pullPolicy: IfNotPresent
|
|
|
|
## Pulsar: Zookeeper cluster
|
|
## templates/zookeeper-statefulset.yaml
|
|
##
|
|
zookeeper:
|
|
component: zookeeper
|
|
replicaCount: 3
|
|
updateStrategy:
|
|
type: OnDelete
|
|
podManagementPolicy: OrderedReady
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "8000"
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 15Gi
|
|
cpu: 4
|
|
volumes:
|
|
data:
|
|
name: data
|
|
size: 20Gi
|
|
## If the storage class is left undefined when using persistence
|
|
## the default storage class for the cluster will be used.
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## Zookeeper configmap
|
|
## templates/zookeeper-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\"-Xms15g -Xmx15g -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no\""
|
|
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
|
|
## Zookeeper service
|
|
## templates/zookeeper-service.yaml
|
|
##
|
|
service:
|
|
annotations:
|
|
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
|
ports:
|
|
- name: server
|
|
port: 2888
|
|
- name: leader-election
|
|
port: 3888
|
|
- name: stats
|
|
port: 2181
|
|
## Zookeeper PodDisruptionBudget
|
|
## templates/zookeeper-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: yes
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar Zookeeper metadata. The metadata will be deployed as
|
|
## soon as the last zookeeper node is reachable. The deployment
|
|
## of other components that depends on zookeeper, such as the
|
|
## bookkeeper nodes, broker nodes, etc will only start to be
|
|
## deployed when the zookeeper cluster is ready and with the
|
|
## metadata deployed
|
|
zookeeperMetadata:
|
|
component: zookeeper-metadata
|
|
|
|
## Pulsar: Bookkeeper cluster
|
|
## templates/bookkeeper-statefulset.yaml
|
|
##
|
|
bookkeeper:
|
|
component: bookkeeper
|
|
replicaCount: 4
|
|
updateStrategy:
|
|
type: OnDelete
|
|
podManagementPolicy: OrderedReady
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "8000"
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 15Gi
|
|
cpu: 4
|
|
volumes:
|
|
journal:
|
|
name: journal
|
|
size: 50Gi
|
|
## If the storage class is left undefined when using persistence
|
|
## the default storage class for the cluster will be used.
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
ledgers:
|
|
name: ledgers
|
|
size: 50Gi
|
|
## If the storage class is left undefined when using persistence
|
|
## the default storage class for the cluster will be used.
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## Bookkeeper configmap
|
|
## templates/bookkeeper-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -verbosegc -XX:G1LogLevel=finest\""
|
|
PULSAR_PREFIX_dbStorage_writeCacheMaxSizeMb: "2048"
|
|
PULSAR_PREFIX_dbStorage_readAheadCacheMaxSizeMb: "2048"
|
|
PULSAR_PREFIX_dbStorage_rocksDB_blockCacheSize: "268435456"
|
|
PULSAR_PREFIX_journalMaxSizeMB: "2048"
|
|
PULSAR_PREFIX_statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
|
|
PULSAR_PREFIX_useHostNameAsBookieID: "true"
|
|
## Bookkeeper configmap
|
|
## templates/bookkeeper-service.yaml
|
|
##
|
|
service:
|
|
annotations:
|
|
publishNotReadyAddresses: "true"
|
|
ports:
|
|
- name: server
|
|
port: 3181
|
|
## Bookkeeper PodDisruptionBudget
|
|
## templates/bookkeeper-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: yes
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar: Broker cluster
|
|
## templates/broker-deployment.yaml
|
|
##
|
|
broker:
|
|
component: broker
|
|
replicaCount: 3
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "8080"
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 15Gi
|
|
cpu: 4
|
|
## Broker configmap
|
|
## templates/broker-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem\""
|
|
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
|
|
PULSAR_PREFIX_managedLedgerDefaultEnsembleSize: "3"
|
|
PULSAR_PREFIX_managedLedgerDefaultWriteQuorum: "3"
|
|
PULSAR_PREFIX_managedLedgerDefaultAckQuorum: "2"
|
|
PULSAR_PREFIX_deduplicationEnabled: "false"
|
|
PULSAR_PREFIX_exposeTopicLevelMetricsInPrometheus: "true"
|
|
## Broker service
|
|
## templates/broker-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
ports:
|
|
- name: http
|
|
port: 8080
|
|
- name: pulsar
|
|
port: 6650
|
|
## Broker PodDisruptionBudget
|
|
## templates/broker-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: yes
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar Extra: Proxy
|
|
## templates/proxy-deployment.yaml
|
|
##
|
|
proxy:
|
|
component: proxy
|
|
replicaCount: 3
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "8080"
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 4Gi
|
|
cpu: 1
|
|
## Proxy configmap
|
|
## templates/proxy-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\"-Xms4g -Xmx4g -XX:MaxDirectMemorySize=4g\""
|
|
## Proxy service
|
|
## templates/proxy-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
type: NodePort
|
|
ports:
|
|
- name: http
|
|
port: 8080
|
|
nodePort: 30001
|
|
protocol: TCP
|
|
- name: tcp
|
|
port: 6650
|
|
nodePort: 30002
|
|
protocol: TCP
|
|
## Proxy PodDisruptionBudget
|
|
## templates/proxy-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: yes
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar Extra: Bookkeeper auto-recovery
|
|
## templates/autorecovery-deployment.yaml
|
|
##
|
|
autoRecovery:
|
|
component: autorecovery
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 1Gi
|
|
cpu: 250m
|
|
## Bookkeeper auto-recovery configmap
|
|
## templates/autorecovery-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\" -Xms1g -Xmx1g \""
|
|
|
|
## Pulsar Extra: Dashboard
|
|
## templates/dashboard-deployment.yaml
|
|
## Deprecated
|
|
##
|
|
dashboard:
|
|
component: dashboard
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
image:
|
|
repository: apachepulsar/pulsar-dashboard
|
|
tag: latest
|
|
pullPolicy: IfNotPresent
|
|
resources:
|
|
requests:
|
|
memory: 1Gi
|
|
cpu: 250m
|
|
## Dashboard service
|
|
## templates/dashboard-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
ports:
|
|
- name: server
|
|
port: 80
|
|
ingress:
|
|
enabled: false
|
|
annotations: {}
|
|
tls:
|
|
enabled: false
|
|
|
|
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
|
|
secretName: ""
|
|
|
|
## Required if ingress is enabled
|
|
hostname: ""
|
|
path: "/"
|
|
port: 80
|
|
|
|
|
|
## Pulsar Extra: Bastion
|
|
## templates/bastion-deployment.yaml
|
|
##
|
|
bastion:
|
|
component: bastion
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
resources:
|
|
requests:
|
|
memory: 1Gi
|
|
cpu: 250m
|
|
## Bastion configmap
|
|
## templates/bastion-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: "\"-Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g\""
|
|
|
|
## Monitoring Stack: Prometheus
|
|
## templates/prometheus-deployment.yaml
|
|
##
|
|
prometheus:
|
|
component: prometheus
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
image:
|
|
repository: prom/prometheus
|
|
tag: v1.6.3
|
|
pullPolicy: IfNotPresent
|
|
resources:
|
|
requests:
|
|
memory: 4Gi
|
|
cpu: 1
|
|
volumes:
|
|
data:
|
|
name: data
|
|
size: 50Gi
|
|
## If the storage class is left undefined when using persistence
|
|
## the default storage class for the cluster will be used.
|
|
##
|
|
# storageClass:
|
|
# type: pd-standard
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## Prometheus service
|
|
## templates/prometheus-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
ports:
|
|
- name: server
|
|
port: 9090
|
|
|
|
## Monitoring Stack: Grafana
|
|
## templates/grafana-deployment.yaml
|
|
##
|
|
grafana:
|
|
component: grafana
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
image:
|
|
repository: apachepulsar/pulsar-grafana
|
|
tag: latest
|
|
pullPolicy: IfNotPresent
|
|
resources:
|
|
requests:
|
|
memory: 4Gi
|
|
cpu: 1
|
|
## Grafana service
|
|
## templates/grafana-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
ports:
|
|
- name: server
|
|
port: 3000
|
|
|
|
## Components Stack: pulsar_manager
|
|
## templates/pulsar-manager.yaml
|
|
##
|
|
|
|
pulsar_manager:
|
|
component: pulsar-manager
|
|
replicaCount: 1
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
annotations: {}
|
|
tolarations: []
|
|
gracePeriod: 0
|
|
image:
|
|
repository: apachepulsar/pulsar-manager
|
|
tag: v0.1.0
|
|
pullPolicy: IfNotPresent
|
|
resources:
|
|
requests:
|
|
memory: 250Mi
|
|
cpu: 0.1
|
|
## Pulsar manager service
|
|
## templates/pulsar-manager-service.yaml
|
|
##
|
|
service:
|
|
type: LoadBalancer
|
|
annotations: {}
|
|
ports:
|
|
- name: server
|
|
port: 9527
|
|
admin:
|
|
user: pulsar
|
|
password: pulsar
|
|
|