1695 lines
61 KiB
YAML
Executable File
1695 lines
61 KiB
YAML
Executable File
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing,
|
|
# software distributed under the License is distributed on an
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
# KIND, either express or implied. See the License for the
|
|
# specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
|
|
###
|
|
### K8S Settings
|
|
###
|
|
|
|
### Namespace to deploy Pulsar
|
|
### Note: Prefer using helm's --namespace flag with --create-namespace instead
|
|
## The namespace to use to deploy the Pulsar components. If left empty,
|
|
## it will default to .Release.Namespace (aka helm --namespace).
|
|
## Please note that kube-prometheus-stack will not be able to scrape Pulsar component metrics by default unless
|
|
## it is deployed in the same namespace as Pulsar. The kube-prometheus-stack namespace can be configured by setting
|
|
## the kube-prometheus-stack.namespaceOverride key to match Pulsar's namespace.
|
|
## More details are provided in the comments for the kube-prometheus-stack.namespaceOverride key later in this file.
|
|
namespace: ""
|
|
namespaceCreate: false
|
|
|
|
## clusterDomain as defined for your k8s cluster
|
|
clusterDomain: cluster.local
|
|
|
|
###
|
|
### Global Settings
|
|
###
|
|
|
|
## Set to true on install
|
|
## There's no need to set this value unless you're using a system that doesn't track .Release.IsInstall or .Release.IsUpgrade (like argocd)
|
|
initialize: false
|
|
## Set useReleaseStatus to false if you're deploying this chart using a system that doesn't track .Release.IsInstall or .Release.IsUpgrade (like argocd)
|
|
useReleaseStatus: true
|
|
## Set cluster name
|
|
# clusterName:
|
|
|
|
## add custom labels to components of cluster
|
|
# labels:
|
|
# environment: dev
|
|
# customer: apache
|
|
|
|
## Pulsar Metadata Prefix
|
|
##
|
|
## By default, pulsar stores all the metadata at root path.
|
|
## You can configure to have a prefix (e.g. "/my-pulsar-cluster").
|
|
## If you do so, all the pulsar and bookkeeper metadata will
|
|
## be stored under the provided path
|
|
metadataPrefix: ""
|
|
|
|
## Port name prefix
|
|
##
|
|
## Used for Istio support which depends on a standard naming of ports
|
|
## See https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#explicit-protocol-selection
|
|
## Prefixes are disabled by default
|
|
|
|
tcpPrefix: "" # For Istio this will be "tcp-"
|
|
tlsPrefix: "" # For Istio this will be "tls-"
|
|
|
|
## Persistence
|
|
##
|
|
## If persistence is enabled, components that have state will
|
|
## be deployed with PersistentVolumeClaims, otherwise, for test
|
|
## purposes, they will be deployed with emptyDir
|
|
##
|
|
## This is a global setting that is applied to all components.
|
|
## If you need to disable persistence for a component,
|
|
## you can set the `volume.persistence` setting to `false` for
|
|
## that component.
|
|
##
|
|
## Deprecated in favor of using `volumes.persistence`
|
|
persistence: true
|
|
## Volume settings
|
|
volumes:
|
|
persistence: true
|
|
# configure the components to use local persistent volume
|
|
# the local provisioner should be installed prior to enable local persistent volume
|
|
local_storage: false
|
|
|
|
## RBAC
|
|
##
|
|
## Configure settings related to RBAC such as limiting broker access to single
|
|
## namespece or enabling PSP
|
|
|
|
rbac:
|
|
enabled: false
|
|
psp: false # DEPRECATED: PodSecurityPolicy is not supported in Kubernetes 1.25+
|
|
limit_to_namespace: true
|
|
|
|
## AntiAffinity
|
|
##
|
|
## Flag to enable and disable `AntiAffinity` for all components.
|
|
## This is a global setting that is applied to all components.
|
|
## If you need to disable AntiAffinity for a component, you can set
|
|
## the `affinity.anti_affinity` settings to `false` for that component.
|
|
affinity:
|
|
## When set to true, the scheduler will try to spread pods across different nodes.
|
|
## It is necessary to set this to false if you're using a Kubernetes cluster with less than 3 nodes, such as local development environments.
|
|
anti_affinity: true
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
|
|
## Components
|
|
##
|
|
## Control what components of Apache Pulsar to deploy for the cluster
|
|
components:
|
|
# zookeeper
|
|
zookeeper: true
|
|
# oxia
|
|
oxia: false
|
|
# bookkeeper
|
|
bookkeeper: true
|
|
# bookkeeper - autorecovery
|
|
autorecovery: true
|
|
# broker
|
|
broker: true
|
|
# functions
|
|
# WARNING! Before enabling functions, make sure that all of your users are trusted since functions run user code
|
|
# and the current security sandbox is not sufficient to protect against malicious code.
|
|
functions: false
|
|
# proxy
|
|
proxy: true
|
|
# toolset
|
|
toolset: true
|
|
# pulsar manager
|
|
pulsar_manager: false
|
|
|
|
# default image repository for pulsar images
|
|
defaultPulsarImageRepository: apachepulsar/pulsar-all
|
|
|
|
# default image tag for pulsar images
|
|
# uses chart's appVersion when unspecified
|
|
defaultPulsarImageTag:
|
|
|
|
# default pull policy for all images
|
|
defaultPullPolicy: IfNotPresent
|
|
|
|
## Images
|
|
##
|
|
## Control what images to use for each component
|
|
images:
|
|
# set imagePullSecrets
|
|
# imagePullSecrets:
|
|
# - secretName
|
|
zookeeper:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
bookie:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
autorecovery:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
broker:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
toolset:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
proxy:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
functions:
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
pulsar_manager:
|
|
repository: apachepulsar/pulsar-manager
|
|
tag: v0.4.0
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
hasCommand: false
|
|
oxia:
|
|
repository: streamnative/oxia
|
|
tag: 0.11.9
|
|
pullPolicy: Always
|
|
|
|
## TLS
|
|
## templates/tls-certs.yaml
|
|
##
|
|
## The chart is using cert-manager for provisioning TLS certs for
|
|
## brokers and proxies.
|
|
tls:
|
|
enabled: false
|
|
ca_suffix: ca-tls
|
|
# common settings for generating certs
|
|
common:
|
|
# 90d
|
|
duration: 2160h
|
|
# 15d
|
|
renewBefore: 360h
|
|
organization:
|
|
- pulsar
|
|
keySize: 4096
|
|
keyAlgorithm: RSA
|
|
keyEncoding: PKCS8
|
|
# settings for generating certs for proxy
|
|
proxy:
|
|
enabled: false
|
|
cert_name: tls-proxy
|
|
# set to false if you want to use an existing certificate
|
|
createCert: true
|
|
# The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate.
|
|
dnsNames:
|
|
# - example.com
|
|
# settings for generating certs for broker
|
|
broker:
|
|
enabled: false
|
|
cert_name: tls-broker
|
|
# The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate.
|
|
dnsNames:
|
|
# - example.com
|
|
# settings for generating certs for bookies
|
|
bookie:
|
|
enabled: false
|
|
cert_name: tls-bookie
|
|
# settings for generating certs for zookeeper
|
|
zookeeper:
|
|
enabled: false
|
|
cert_name: tls-zookeeper
|
|
# settings for generating certs for recovery
|
|
autorecovery:
|
|
cert_name: tls-recovery
|
|
# settings for generating certs for toolset
|
|
toolset:
|
|
cert_name: tls-toolset
|
|
# TLS setting for function runtime instance
|
|
function_instance:
|
|
# controls the use of TLS for function runtime connections towards brokers
|
|
enabled: false
|
|
oxia:
|
|
enabled: false
|
|
|
|
# Enable or disable broker authentication and authorization.
|
|
auth:
|
|
authentication:
|
|
enabled: false
|
|
provider: "jwt"
|
|
jwt:
|
|
# Enable JWT authentication
|
|
# If the token is generated by a secret key, set the usingSecretKey as true.
|
|
# If the token is generated by a private key, set the usingSecretKey as false.
|
|
usingSecretKey: false
|
|
authorization:
|
|
enabled: false
|
|
superUsers:
|
|
# broker to broker communication
|
|
broker: "broker-admin"
|
|
# proxy to broker communication
|
|
proxy: "proxy-admin"
|
|
# pulsar-admin client to broker/proxy communication
|
|
client: "admin"
|
|
# pulsar-manager to broker communication. If left empty, no jwt setup will be performed in the manager
|
|
manager: ""
|
|
# omits the above proxy role from superusers on the proxy
|
|
# and configures it as a proxy role on the broker in addition to the superusers
|
|
useProxyRoles: true
|
|
|
|
######################################################################
|
|
# External dependencies
|
|
######################################################################
|
|
|
|
## cert-manager
|
|
## templates/tls-cert-issuer.yaml
|
|
##
|
|
## Cert manager is used for automatically provisioning TLS certificates
|
|
## for components within a Pulsar cluster
|
|
certs:
|
|
internal_issuer:
|
|
apiVersion: cert-manager.io/v1
|
|
enabled: false
|
|
component: internal-cert-issuer
|
|
# The type of issuer, supports selfsigning and ca
|
|
type: selfsigning
|
|
# 90d
|
|
duration: 2160h
|
|
# 15d
|
|
renewBefore: 360h
|
|
issuers:
|
|
# Used for certs.type as selfsigning, the selfsigned issuer has no dependency on any other resource.
|
|
selfsigning:
|
|
# used for certs.type as ca, the CA issuer needs to reference a Secret which contains your CA certificate and signing private key.
|
|
ca:
|
|
secretName:
|
|
|
|
######################################################################
|
|
# Below are settings for each component
|
|
######################################################################
|
|
|
|
## Pulsar: Zookeeper cluster
|
|
## templates/zookeeper-statefulset.yaml
|
|
##
|
|
zookeeper:
|
|
# use a component name that matches your grafana configuration
|
|
# so the metrics are correctly rendered in grafana dashboard
|
|
component: zookeeper
|
|
# the number of zookeeper servers to run. it should be an odd number larger than or equal to 3.
|
|
replicaCount: 3
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
podManagementPolicy: Parallel
|
|
initContainers: []
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
ports:
|
|
http: 8000
|
|
client: 2181
|
|
clientTls: 2281
|
|
follower: 2888
|
|
leaderElection: 3888
|
|
# admin: 9990
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
probe:
|
|
liveness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 30
|
|
timeoutSeconds: 30
|
|
readiness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 30
|
|
timeoutSeconds: 30
|
|
startup:
|
|
enabled: false
|
|
failureThreshold: 30
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 30
|
|
timeoutSeconds: 30
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
gracePeriod: 30
|
|
resources:
|
|
requests:
|
|
memory: 256Mi
|
|
cpu: 0.1
|
|
# extraVolumes and extraVolumeMounts allows you to mount other volumes
|
|
# Example Use Case: mount ssl certificates
|
|
# extraVolumes:
|
|
# - name: ca-certs
|
|
# secret:
|
|
# defaultMode: 420
|
|
# secretName: ca-certs
|
|
# extraVolumeMounts:
|
|
# - name: ca-certs
|
|
# mountPath: /certs
|
|
# readOnly: true
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
# Ensures 2.10.0 non-root docker image works correctly.
|
|
securityContext:
|
|
fsGroup: 0
|
|
fsGroupChangePolicy: "OnRootMismatch"
|
|
volumes:
|
|
useSeparateDiskForTxlog: false
|
|
# use a persistent volume or emptyDir
|
|
persistence: true
|
|
data:
|
|
name: data
|
|
size: 20Gi
|
|
local_storage: true
|
|
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
|
|
##
|
|
# storageClassName: existent-storage-class
|
|
#
|
|
## Instead if you want to create a new storage class define it below
|
|
## If left undefined no storage class will be defined along with PVC
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## If you want to bind static persistent volumes via selectors, e.g.:
|
|
# selector:
|
|
# matchLabels:
|
|
# app: pulsar-zookeeper
|
|
selector: {}
|
|
## If you set useSeparateDiskForTxlog to true, this section configures the extra volume for the zookeeper transaction log.
|
|
datalog:
|
|
name: datalog
|
|
size: 20Gi
|
|
local_storage: true
|
|
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
|
|
##
|
|
# storageClassName: existent-storage-class
|
|
#
|
|
## Instead if you want to create a new storage class define it below
|
|
## If left undefined no storage class will be defined along with PVC
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## If you want to bind static persistent volumes via selectors, e.g.:
|
|
# selector:
|
|
# matchLabels:
|
|
# app: pulsar-zookeeper
|
|
selector: {}
|
|
# External zookeeper server list in case of global-zk list to create zk cluster across zk deployed on different clusters/namespaces
|
|
# Example value: "us-east1-pulsar-zookeeper-0.us-east1-pulsar-zookeeper.us-east1.svc.cluster.local:2888:3888,us-east1-pulsar-zookeeper-1.us-east1-pulsar-zookeeper.us-east1.svc.cluster.local:2888:3888,us-east1-pulsar-zookeeper-2.us-east1-pulsar-zookeeper.us-east1.svc.cluster.local:2888:3888,us-west1-pulsar-zookeeper-0.us-west1-pulsar-zookeeper.us-west1.svc.cluster.local:2888:3888,us-west1-pulsar-zookeeper-1.us-west1-pulsar-zookeeper.us-west1.svc.cluster.local:2888:3888,us-west1-pulsar-zookeeper-2.us-west1-pulsar-zookeeper.us-west1.svc.cluster.local:2888:3888"
|
|
externalZookeeperServerList: ""
|
|
## Zookeeper service account
|
|
## templates/zookeeper-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## Zookeeper configmap
|
|
## templates/zookeeper-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms64m -Xmx128m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dcom.sun.management.jmxremote
|
|
-Djute.maxbuffer=10485760
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:+DisableExplicitGC
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
## Add a custom command to the start up process of the zookeeper pods (e.g. update-ca-certificates, jvm commands, etc)
|
|
additionalCommand:
|
|
## Zookeeper service
|
|
## templates/zookeeper-service.yaml
|
|
##
|
|
service:
|
|
annotations: {}
|
|
## Zookeeper PodDisruptionBudget
|
|
## templates/zookeeper-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: true
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar: Oxia cluster
|
|
oxia:
|
|
component: oxia
|
|
initialShardCount: 3
|
|
replicationFactor: 3
|
|
## templates/coordinator-deployment.yaml
|
|
coordinator:
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
cpuLimit: 100m
|
|
memoryLimit: 128Mi
|
|
ports:
|
|
internal: 6649
|
|
metrics: 8080
|
|
service:
|
|
annotations: {}
|
|
service_account:
|
|
annotations: {}
|
|
tolerations: []
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
## templates/server-statefulset.yaml
|
|
server:
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
replicas: 3
|
|
# CPU limit for the server pod
|
|
cpuLimit: 1
|
|
# Memory limit for the server pod
|
|
memoryLimit: 1Gi
|
|
# Oxia database cache size in MB
|
|
dbCacheSizeMb: 512
|
|
# Storage size for the PVC of the server pod
|
|
storageSize: 8Gi
|
|
# Storage class name for the PVC of the server pod
|
|
# storageClassName: existent-storage-class
|
|
ports:
|
|
public: 6648
|
|
internal: 6649
|
|
metrics: 8080
|
|
service:
|
|
public:
|
|
annotations: {}
|
|
internal:
|
|
annotations: {}
|
|
service_account:
|
|
annotations: {}
|
|
securityContext:
|
|
fsGroup: 0
|
|
fsGroupChangePolicy: "OnRootMismatch"
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
tolerations: []
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
pprofEnabled: false
|
|
## Pulsar: Bookkeeper cluster
|
|
## templates/bookkeeper-statefulset.yaml
|
|
##
|
|
bookkeeper:
|
|
# use a component name that matches your grafana configuration
|
|
# so the metrics are correctly rendered in grafana dashboard
|
|
component: bookie
|
|
## BookKeeper Cluster Initialize
|
|
## templates/bookkeeper-cluster-initialize.yaml
|
|
metadata:
|
|
## Timeout for waiting for zookeeper to become available before running metadata initialization
|
|
waitZookeeperTimeout: 600
|
|
## Timeout for running metadata initialization
|
|
initTimeout: 60
|
|
## Timeout for waiting for oxia to be available before running metadata initialization. This setting applies only when oxia is enabled.
|
|
waitOxiaTimeout: 600
|
|
## Set the resources used for running `bin/bookkeeper shell initnewcluster`
|
|
##
|
|
resources:
|
|
# requests:
|
|
# memory: 4Gi
|
|
# cpu: 2
|
|
replicaCount: 4
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
podManagementPolicy: Parallel
|
|
initContainers: []
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
ports:
|
|
http: 8000
|
|
bookie: 3181
|
|
statestore: 4181
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
probe:
|
|
liveness:
|
|
enabled: true
|
|
failureThreshold: 60
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
readiness:
|
|
enabled: true
|
|
failureThreshold: 60
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
startup:
|
|
enabled: false
|
|
failureThreshold: 30
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
gracePeriod: 30
|
|
## Timeout for waiting for bookkeeper cluster metadata to be initialized before starting a bookie
|
|
waitMetadataTimeout: 600
|
|
resources:
|
|
requests:
|
|
memory: 512Mi
|
|
cpu: 0.2
|
|
# extraVolumes and extraVolumeMounts allows you to mount other volumes
|
|
# Example Use Case: mount ssl certificates
|
|
# extraVolumes:
|
|
# - name: ca-certs
|
|
# secret:
|
|
# defaultMode: 420
|
|
# secretName: ca-certs
|
|
# extraVolumeMounts:
|
|
# - name: ca-certs
|
|
# mountPath: /certs
|
|
# readOnly: true
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
# Ensures 2.10.0 non-root docker image works correctly.
|
|
securityContext:
|
|
fsGroup: 0
|
|
fsGroupChangePolicy: "OnRootMismatch"
|
|
volumes:
|
|
# use a persistent volume or emptyDir
|
|
persistence: true
|
|
journal:
|
|
name: journal
|
|
size: 10Gi
|
|
local_storage: true
|
|
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
|
|
##
|
|
# storageClassName: existent-storage-class
|
|
#
|
|
## Instead if you want to create a new storage class define it below
|
|
## If left undefined no storage class will be defined along with PVC
|
|
##
|
|
# storageClass:
|
|
# type: pd-ssd
|
|
# fsType: xfs
|
|
# provisioner: kubernetes.io/gce-pd
|
|
## If you want to bind static persistent volumes via selectors, e.g.:
|
|
# selector:
|
|
# matchLabels:
|
|
# app: pulsar-bookkeeper-journal
|
|
selector: {}
|
|
useMultiVolumes: false
|
|
multiVolumes:
|
|
- name: journal0
|
|
size: 10Gi
|
|
# storageClassName: existent-storage-class
|
|
mountPath: /pulsar/data/bookkeeper/journal0
|
|
- name: journal1
|
|
size: 10Gi
|
|
# storageClassName: existent-storage-class
|
|
mountPath: /pulsar/data/bookkeeper/journal1
|
|
ledgers:
|
|
name: ledgers
|
|
size: 50Gi
|
|
local_storage: true
|
|
# storageClassName:
|
|
# storageClass:
|
|
# ...
|
|
# selector:
|
|
# ...
|
|
useMultiVolumes: false
|
|
multiVolumes:
|
|
- name: ledgers0
|
|
size: 10Gi
|
|
# storageClassName: existent-storage-class
|
|
mountPath: /pulsar/data/bookkeeper/ledgers0
|
|
- name: ledgers1
|
|
size: 10Gi
|
|
# storageClassName: existent-storage-class
|
|
mountPath: /pulsar/data/bookkeeper/ledgers1
|
|
|
|
## use a single common volume for both journal and ledgers
|
|
useSingleCommonVolume: false
|
|
common:
|
|
name: common
|
|
size: 60Gi
|
|
local_storage: true
|
|
# storageClassName:
|
|
# storageClass: ## this is common too
|
|
# ...
|
|
# selector:
|
|
# ...
|
|
## Bookkeeper service account
|
|
## templates/bookkeeper-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## Bookkeeper configmap
|
|
## templates/bookkeeper-configmap.yaml
|
|
##
|
|
configData:
|
|
# we use `bin/pulsar` for starting bookie daemons
|
|
PULSAR_MEM: >
|
|
-Xms128m
|
|
-Xmx256m
|
|
-XX:MaxDirectMemorySize=256m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
#
|
|
# Bookkeeper configuration reference: https://bookkeeper.apache.org/docs/reference/config
|
|
#
|
|
# https://bookkeeper.apache.org/docs/reference/config#db-ledger-storage-settings
|
|
# You could use the below example settings for a minimal configuration
|
|
# dbStorage_writeCacheMaxSizeMb: "32"
|
|
# dbStorage_readAheadCacheMaxSizeMb: "32"
|
|
# dbStorage_rocksDB_writeBufferSizeMB: "8"
|
|
# dbStorage_rocksDB_blockCacheSize: "8388608"
|
|
#
|
|
# configure the data compaction (bookie entry log compaction and gc) settings
|
|
# https://bookkeeper.apache.org/docs/reference/config#garbage-collection-settings
|
|
# https://bookkeeper.apache.org/docs/reference/config#entry-log-compaction-settings
|
|
minorCompactionThreshold: "0.2" # default 0.2 (use default)
|
|
minorCompactionInterval: "360" # default 3600 seconds (6 minutes vs default 1 hour)
|
|
majorCompactionThreshold: "0.8" # default 0.5
|
|
majorCompactionInterval: "10800" # default 86400 seconds (3 hours vs default 1 day)
|
|
gcWaitTime: "300000" # default 900000 milli-seconds (5 minutes vs default 15 minutes)
|
|
isForceGCAllowWhenNoSpace: "true" # default false
|
|
# disk utilization configuration
|
|
# https://bookkeeper.apache.org/docs/reference/config#disk-utilization
|
|
# Make sure that diskUsageLwmThreshold <= diskUsageWarnThreshold <= diskUsageThreshold
|
|
diskUsageLwmThreshold: "0.85" # default 0.90
|
|
diskUsageWarnThreshold: "0.9" # default 0.95
|
|
diskUsageThreshold: "0.95" # default 0.95 (use default)
|
|
diskCheckInterval: "1800" # default 10000
|
|
|
|
## Add a custom command to the start up process of the bookie pods (e.g. update-ca-certificates, jvm commands, etc)
|
|
additionalCommand:
|
|
## Bookkeeper Service
|
|
## templates/bookkeeper-service.yaml
|
|
##
|
|
service:
|
|
spec:
|
|
publishNotReadyAddresses: true
|
|
## Bookkeeper PodDisruptionBudget
|
|
## templates/bookkeeper-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: true
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar: Bookkeeper AutoRecovery
|
|
## templates/autorecovery-statefulset.yaml
|
|
##
|
|
autorecovery:
|
|
# use a component name that matches your grafana configuration
|
|
# so the metrics are correctly rendered in grafana dashboard
|
|
component: recovery
|
|
replicaCount: 1
|
|
initContainers: []
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
ports:
|
|
http: 8000
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
# tolerations: []
|
|
gracePeriod: 30
|
|
## Timeout for waiting for bookkeeper to become available before starting a broker
|
|
waitBookkeeperTimeout: 120
|
|
resources:
|
|
requests:
|
|
memory: 64Mi
|
|
cpu: 0.05
|
|
## Bookkeeper auto-recovery service
|
|
## templates/autorecovery-service.yaml
|
|
service:
|
|
annotations: {}
|
|
## Bookkeeper auto-recovery service account
|
|
## templates/autorecovery-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## Bookkeeper auto-recovery configmap
|
|
## templates/autorecovery-configmap.yaml
|
|
##
|
|
configData:
|
|
BOOKIE_MEM: >
|
|
-Xms64m -Xmx64m
|
|
PULSAR_PREFIX_useV2WireProtocol: "true"
|
|
|
|
## Pulsar Zookeeper metadata. The metadata will be deployed as
|
|
## soon as the last zookeeper node is reachable. The deployment
|
|
## of other components that depends on zookeeper, such as the
|
|
## bookkeeper nodes, broker nodes, etc will only start to be
|
|
## deployed when the zookeeper cluster is ready and with the
|
|
## metadata deployed
|
|
pulsar_metadata:
|
|
component: pulsar-init
|
|
image:
|
|
# the image used for running `pulsar-cluster-initialize` job
|
|
# uses defaultPulsarImageRepository when unspecified
|
|
repository:
|
|
# uses defaultPulsarImageTag when unspecified
|
|
tag:
|
|
# uses defaultPullPolicy when unspecified
|
|
pullPolicy:
|
|
## set an existing configuration store. This setting applies only when zookeeper is enabled.
|
|
# configurationStore:
|
|
# the prefix for the configuration store metadata. This setting applies only when zookeeper is enabled.
|
|
configurationStoreMetadataPrefix: ""
|
|
# the configuration store port. This setting applies only when zookeeper is enabled.
|
|
configurationStorePort: 2181
|
|
# the zookeeper timeout. This setting applies only when zookeeper is enabled.
|
|
waitZookeeperTimeout: 600
|
|
## Timeout for waiting for oxia to be available before running metadata initialization. This setting applies only when oxia is enabled.
|
|
waitOxiaTimeout: 600
|
|
## Timeout for waiting for bookkeeper to be initialized before running metadata initialization
|
|
waitBookkeeperTimeout: 120
|
|
## Timeout for running metadata initialization
|
|
initTimeout: 60
|
|
|
|
## Allow read-only operations on the metadata store when the metadata store is not available.
|
|
## This is useful when you want to continue serving requests even if the metadata store is not fully available with quorum.
|
|
metadataStoreAllowReadOnlyOperations: false
|
|
|
|
## The session timeout for the metadata store in milliseconds.
|
|
metadataStoreSessionTimeoutMillis: 30000
|
|
|
|
## Metadata store operation timeout in seconds.
|
|
metadataStoreOperationTimeoutSeconds: 30
|
|
|
|
## The expiry time for the metadata store cache in seconds.
|
|
metadataStoreCacheExpirySeconds: 300
|
|
|
|
## Whether we should enable metadata operations batching
|
|
metadataStoreBatchingEnabled: true
|
|
|
|
## Maximum delay to impose on batching grouping (in milliseconds)
|
|
metadataStoreBatchingMaxDelayMillis: 5
|
|
|
|
## Maximum number of operations to include in a singular batch
|
|
metadataStoreBatchingMaxOperations: 1000
|
|
|
|
## Maximum size of a batch (in KB)
|
|
metadataStoreBatchingMaxSizeKb: 128
|
|
|
|
## BookKeeper client and BookKeeper metadata configuration settings with Pulsar Helm Chart deployments
|
|
bookkeeper:
|
|
## Controls whether to use the PIP-45 metadata driver (PulsarMetadataClientDriver) for BookKeeper client
|
|
## in the Pulsar Broker when using ZooKeeper as a metadata store.
|
|
## This is setting applies to Pulsar Broker's BookKeeper client.
|
|
## When set to true, Pulsar Broker's BookKeeper client will use the PIP-45 metadata driver (PulsarMetadataBookieDriver).
|
|
## When set to false, Pulsar Broker's BookKeeper client will use BookKeeper's default ZooKeeper connection implementation.
|
|
usePulsarMetadataClientDriver: false
|
|
|
|
## Controls whether to use the PIP-45 metadata driver (PulsarMetadataBookieDriver) for BookKeeper components
|
|
## when using ZooKeeper as a metadata store.
|
|
## This is a global setting that applies to all BookKeeper components.
|
|
## When set to true, BookKeeper components will use the PIP-45 metadata driver (PulsarMetadataBookieDriver).
|
|
## When set to false, BookKeeper components will use BookKeeper's default ZooKeeper connection implementation.
|
|
## Warning: Do not enable this feature unless you are aware of the risks and have tested it in non-production environments.
|
|
usePulsarMetadataBookieDriver: false
|
|
|
|
## The session timeout for the metadata store in milliseconds. This setting is mapped to `zkTimeout` in `bookkeeper.conf`.
|
|
## due to implementation details in the PulsarMetadataBookieDriver, it also applies when Oxia metadata store is enabled.
|
|
metadataStoreSessionTimeoutMillis: 30000
|
|
|
|
# resources for bin/pulsar initialize-cluster-metadata
|
|
resources:
|
|
# requests:
|
|
# memory: 512Mi
|
|
# cpu: 1
|
|
|
|
## optional you can specify tolerations and nodeSelectors for all init jobs (pulsar-init & bookkeeper-init)
|
|
# tolerations: []
|
|
# - key: "someKey"
|
|
# operator: "Equal"
|
|
# value: "someValue"
|
|
# effect: "NoSchedule"
|
|
# nodeSelector: {}
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
|
|
## optional, you can provide your own zookeeper metadata store for other components
|
|
# to use this, you should explicit set components.zookeeper to false
|
|
#
|
|
# userProvidedZookeepers: "zk01.example.com:2181,zk02.example.com:2181"
|
|
|
|
# Can be used to run extra commands in the initialization jobs e.g. to quit istio sidecars etc.
|
|
extraInitCommand: ""
|
|
|
|
## Pulsar: Broker cluster
|
|
## templates/broker-statefulset.yaml
|
|
##
|
|
broker:
|
|
# use a component name that matches your grafana configuration
|
|
# so the metrics are correctly rendered in grafana dashboard
|
|
component: broker
|
|
replicaCount: 3
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 3
|
|
metrics: ~
|
|
behavior: ~
|
|
# The podManagementPolicy cannot be modified for an existing deployment. If you need to change this value, you will need to manually delete the existing broker StatefulSet and then redeploy the chart.
|
|
podManagementPolicy:
|
|
initContainers: []
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
ports:
|
|
http: 8080
|
|
https: 8443
|
|
pulsar: 6650
|
|
pulsarssl: 6651
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
probe:
|
|
liveness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
readiness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
startup:
|
|
enabled: false
|
|
failureThreshold: 30
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: preferredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
gracePeriod: 30
|
|
## Timeout for waiting for zookeeper to become available before starting a broker
|
|
waitZookeeperTimeout: 600
|
|
## Timeout for waiting for oxia to be available before starting a broker. This setting applies only when oxia is enabled.
|
|
waitOxiaTimeout: 600
|
|
## Timeout for waiting for bookkeeper to become available before starting a broker
|
|
waitBookkeeperTimeout: 120
|
|
resources:
|
|
requests:
|
|
memory: 512Mi
|
|
cpu: 0.2
|
|
# extraVolumes and extraVolumeMounts allows you to mount other volumes
|
|
# Example Use Case: mount ssl certificates
|
|
# extraVolumes:
|
|
# - name: ca-certs
|
|
# secret:
|
|
# defaultMode: 420
|
|
# secretName: ca-certs
|
|
# extraVolumeMounts:
|
|
# - name: ca-certs
|
|
# mountPath: /certs
|
|
# readOnly: true
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
extraEnvs: []
|
|
# - name: POD_NAME
|
|
# valueFrom:
|
|
# fieldRef:
|
|
# apiVersion: v1
|
|
# fieldPath: metadata.name
|
|
## Broker configmap
|
|
## templates/broker-configmap.yaml
|
|
## Keys in broker.conf can be overridden here. Use PULSAR_PREFIX_ to add keys to broker.conf.
|
|
## In addition, keys in function_worker.yml can be overridden using the PF_ prefix, with _ serving as the key path separator.
|
|
##
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms128m -Xmx256m -XX:MaxDirectMemorySize=256m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
managedLedgerDefaultEnsembleSize: "2"
|
|
managedLedgerDefaultWriteQuorum: "2"
|
|
managedLedgerDefaultAckQuorum: "2"
|
|
|
|
## Add a custom command to the start up process of the broker pods (e.g. update-ca-certificates, jvm commands, etc)
|
|
additionalCommand:
|
|
## Broker service
|
|
## templates/broker-service.yaml
|
|
##
|
|
service:
|
|
# clusterIP can be one of the three, which determines the type of k8s service deployed for broker
|
|
# 1. a valid IPv4 address -> non-headless service, let you select the IPv4 address
|
|
# 2. '' -> non-headless service, k8s picks an IPv4 address
|
|
# 3. 'None' -> headless
|
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip
|
|
clusterIP: "None"
|
|
annotations: {}
|
|
## Broker PodDisruptionBudget
|
|
## templates/broker-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: true
|
|
maxUnavailable: 1
|
|
### Broker service account
|
|
## templates/broker-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## You may use the following annotation in order to use EKS IAM Roles for Service Accounts (IRSA)
|
|
# eks.amazonaws.com/role-arn: arn:aws:iam::66666:role/my-iam-role-with-s3-access
|
|
## Tiered Storage
|
|
##
|
|
storageOffload: {}
|
|
## General
|
|
## =======
|
|
# maxBlockSizeInBytes: "64000000"
|
|
# readBufferSizeInBytes: "1000000"
|
|
## The following are default values for the cluster. They can be changed
|
|
## on each namespace.
|
|
# managedLedgerOffloadDeletionLagMs: "14400000"
|
|
# managedLedgerOffloadAutoTriggerSizeThresholdBytes: "-1" # disabled
|
|
|
|
## For AWS S3
|
|
## ======
|
|
## Either you must create an IAM account with access to the bucket and
|
|
## generate keys for that account, or use IAM Roles for Service Accounts (IRSA)
|
|
## (example on `.Value.broker.service_account.annotations` section above)
|
|
##
|
|
# driver: aws-s3
|
|
# bucket: <bucket>
|
|
# region: <region>
|
|
## Secret that stores AWS credentials, using the following command:
|
|
## ```
|
|
## kubectl -n pulsar create secret generic \
|
|
## --from-literal=AWS_ACCESS_KEY_ID=<AWS ACCESS KEY> \
|
|
## --from-literal=AWS_SECRET_ACCESS_KEY=<AWS SECRET KEY> \
|
|
## <secret name>
|
|
## ```
|
|
# secret: <secret name> # [k8s secret name that stores AWS credentials]
|
|
|
|
## For S3 Compatible
|
|
## =================
|
|
## Need to create access and secret key for S3 compatible service
|
|
#
|
|
# driver: aws-s3
|
|
# bucket: <bucket>
|
|
# region: <region>
|
|
# serviceEndpoint: host:port
|
|
## Secret that stores AWS credentials, using the following command:
|
|
## ```
|
|
## kubectl -n pulsar create secret generic \
|
|
## --from-literal=AWS_ACCESS_KEY_ID=<AWS ACCESS KEY> \
|
|
## --from-literal=AWS_SECRET_ACCESS_KEY=<AWS SECRET KEY> \
|
|
## <aws secret name>
|
|
## ```
|
|
# secret: <aws secret name> # [k8s secret name that stores AWS credentials]
|
|
|
|
## For Azure Blob
|
|
## =================
|
|
## Need to create an Azure storage account and a blob containter (bucket)
|
|
## To retrieve key, see https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#code-try-1
|
|
#
|
|
# driver: azureblob
|
|
# bucket: <bucket>
|
|
# region: <region>
|
|
## Secret that stores AZURE credentials, using the following command:
|
|
## ```
|
|
## kubectl -n pulsar create secret generic \
|
|
## --from-literal=AZURE_STORAGE_ACCOUNT=<AZURE STORAGE ACCOUNT> \
|
|
## --from-literal=AZURE_STORAGE_ACCESS_KEY=<AZURE STORAGE ACCESS KEY> \
|
|
## <azure secret name>
|
|
## ```
|
|
# secret: <azure secret name> # [k8s secret name that stores AZURE credentials]
|
|
|
|
## For Google Cloud Storage
|
|
## ====================
|
|
## You must create a service account that has access to the objects in GCP buckets
|
|
## and upload its key as a JSON file to a secret.
|
|
##
|
|
## 1. Go to https://console.cloud.google.com/iam-admin/serviceaccounts
|
|
## 2. Select your project.
|
|
## 3. Create a new service account.
|
|
## 4. Give the service account permission to access the bucket. For example,
|
|
## the "Storage Object Admin" role.
|
|
## 5. Create a key for the service account and save it as a JSON file.
|
|
## 6. Save the JSON file in a secret:
|
|
## kubectl create secret generic pulsar-gcp-sa-secret \
|
|
## --from-file=google-service-account-key.json \
|
|
## --namespace pulsar
|
|
##
|
|
# driver: google-cloud-storage
|
|
# bucket: <bucket>
|
|
# region: <region>
|
|
# gcsServiceAccountSecret: pulsar-gcp-sa-secret # pragma: allowlist secret
|
|
# gcsServiceAccountJsonFile: google-service-account-key.json
|
|
|
|
## Pulsar: Functions Worker
|
|
## The Function Worker component runs embedded with the broker
|
|
## Configuration for the function worker is set in the broker configmap with keys prefixed by `PF_`.
|
|
functions:
|
|
component: functions-worker
|
|
useBookieAsStateStore: false
|
|
## Pulsar: Functions Worker ClusterRole or Role
|
|
## templates/broker-rbac.yaml
|
|
# Default is false which deploys functions with ClusterRole and ClusterRoleBinding at the cluster level
|
|
# Set to true to deploy functions with Role and RoleBinding inside the specified namespace
|
|
rbac:
|
|
limit_to_namespace: true
|
|
### Functions Worker service account
|
|
## templates/broker-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
|
|
## Pulsar: Proxy Cluster
|
|
## templates/proxy-statefulset.yaml
|
|
##
|
|
proxy:
|
|
# use a component name that matches your grafana configuration
|
|
# so the metrics are correctly rendered in grafana dashboard
|
|
component: proxy
|
|
replicaCount: 3
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 3
|
|
metrics: ~
|
|
behavior: ~
|
|
initContainers: []
|
|
# This is how prometheus discovers this component
|
|
podMonitor:
|
|
enabled: true
|
|
interval: 60s
|
|
scrapeTimeout: 60s
|
|
metricRelabelings:
|
|
# - action: labeldrop
|
|
# regex: cluster
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
probe:
|
|
liveness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
readiness:
|
|
enabled: true
|
|
failureThreshold: 10
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
startup:
|
|
enabled: false
|
|
failureThreshold: 30
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
affinity:
|
|
anti_affinity: true
|
|
anti_affinity_topology_key: kubernetes.io/hostname
|
|
# Set the anti affinity type. Valid values:
|
|
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
|
|
# preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to enforce but not guranentee
|
|
type: requiredDuringSchedulingIgnoredDuringExecution
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
gracePeriod: 30
|
|
## Timeout for waiting for zookeeper to become available before starting a proxy
|
|
waitZookeeperTimeout: 600
|
|
## Timeout for waiting for oxia to be available before starting a proxy. This setting applies only when oxia is enabled.
|
|
waitOxiaTimeout: 600
|
|
## Timeout for waiting for brokers to become available before starting a proxy
|
|
waitBrokerTimeout: 120
|
|
resources:
|
|
requests:
|
|
memory: 128Mi
|
|
cpu: 0.2
|
|
# extraVolumes and extraVolumeMounts allows you to mount other volumes
|
|
# Example Use Case: mount ssl certificates
|
|
# extraVolumes:
|
|
# - name: ca-certs
|
|
# secret:
|
|
# defaultMode: 420
|
|
# secretName: ca-certs
|
|
# extraVolumeMounts:
|
|
# - name: ca-certs
|
|
# mountPath: /certs
|
|
# readOnly: true
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
extraEnvs: []
|
|
# - name: POD_IP
|
|
# valueFrom:
|
|
# fieldRef:
|
|
# apiVersion: v1
|
|
# fieldPath: status.podIP
|
|
## Proxy service account
|
|
## templates/proxy-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## Proxy configmap
|
|
## templates/proxy-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms64m -Xmx64m -XX:MaxDirectMemorySize=64m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
httpNumThreads: "8"
|
|
## Add a custom command to the start up process of the proxy pods (e.g. update-ca-certificates, jvm commands, etc)
|
|
additionalCommand:
|
|
## Proxy service
|
|
## templates/proxy-service.yaml
|
|
##
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
pulsar: 6650
|
|
pulsarssl: 6651
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
service:
|
|
# Service type defaults to ClusterIP for security reasons.
|
|
#
|
|
# SECURITY NOTICE: The Pulsar proxy is not designed for direct public internet exposure
|
|
# (see https://pulsar.apache.org/docs/4.0.x/administration-proxy/).
|
|
#
|
|
# If you need to expose the proxy outside of the cluster using a LoadBalancer service type:
|
|
# 1. Set type to LoadBalancer only in secured environments with proper network controls.
|
|
# In cloud managed Kubernetes clusters, make sure to add annotations to the service to create an
|
|
# internal load balancer so that the load balancer is not exposed to the public internet.
|
|
# You must also ensure that the configuration is correct so that the load balancer is not exposed to the public internet.
|
|
# 2. Configure authentication and authorization
|
|
# 3. Use TLS for all connections
|
|
# 4. If you are exposing to unsecure networks, implement additional security measures like
|
|
# IP restrictions (loadBalancerSourceRanges)
|
|
#
|
|
# Please notice that the the Apache Pulsar project takes no responsibility for any security issues
|
|
# for your deployment. Exposing the cluster using Pulsar Proxy to unsecure networks is not supported.
|
|
#
|
|
# Previous chart versions defaulted to LoadBalancer which could create security risks.
|
|
type: ClusterIP
|
|
# When using a LoadBalancer service type, add internal load balancer annotations to the service to create an internal load balancer.
|
|
annotations: {
|
|
## Set internal load balancer annotations when using a LoadBalancer service type because of security reasons.
|
|
## You must also ensure that the configuration is correct so that the load balancer is not exposed to the public internet.
|
|
## This information below is for reference only and may not be applicable to your cloud provider.
|
|
## Please refer to the cloud provider's documentation for the correct annotations.
|
|
## Kubernetes documentation about internal load balancers
|
|
## https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
## AWS / EKS
|
|
## Ensure that you have recent AWS Load Balancer Controller installed.
|
|
## Docs: https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/
|
|
# service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
|
|
## Azure / AKS
|
|
## Docs: https://learn.microsoft.com/en-us/azure/aks/internal-lb
|
|
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
|
|
## GCP / GKE
|
|
## Docs: https://cloud.google.com/kubernetes-engine/docs/concepts/service-load-balancer-parameters
|
|
# networking.gke.io/load-balancer-type: "Internal"
|
|
## Allow global access to the internal load balancer when needed.
|
|
# networking.gke.io/internal-load-balancer-allow-global-access: "true"
|
|
}
|
|
|
|
## Optional. Leave it blank to get next available random IP.
|
|
loadBalancerIP: ""
|
|
## Set external traffic policy to: "Local" to preserve source IP on providers supporting it.
|
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
|
# externalTrafficPolicy: Local
|
|
## Restrict traffic through the load balancer to specified IPs on providers supporting it.
|
|
# loadBalancerSourceRanges:
|
|
# - 10.0.0.0/8
|
|
# Set a loadBalancerClass for loadbalancer service. (example: loadBalancerClass is needed by metallb)
|
|
# loadBalancerClass: ""
|
|
# Optional. When setting proxy.service.type is set to NodePort, nodePorts allows to choose the port that will be open on each node to proxy requests to each destination proxy service.
|
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
nodePorts:
|
|
http: ""
|
|
https: ""
|
|
pulsar: ""
|
|
pulsarssl: ""
|
|
## Proxy ingress
|
|
## templates/proxy-ingress.yaml
|
|
##
|
|
ingress:
|
|
enabled: false
|
|
annotations: {}
|
|
ingressClassName: ""
|
|
tls:
|
|
enabled: false
|
|
|
|
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
|
|
secretName: ""
|
|
|
|
hostname: ""
|
|
path: "/"
|
|
pathType: ImplementationSpecific
|
|
## Proxy PodDisruptionBudget
|
|
## templates/proxy-pdb.yaml
|
|
##
|
|
pdb:
|
|
usePolicy: true
|
|
maxUnavailable: 1
|
|
|
|
## Pulsar ToolSet
|
|
## templates/toolset-statefulset.yaml
|
|
##
|
|
toolset:
|
|
component: toolset
|
|
useProxy: true
|
|
replicaCount: 1
|
|
initContainers: []
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
gracePeriod: 30
|
|
resources:
|
|
requests:
|
|
memory: 256Mi
|
|
cpu: 0.1
|
|
# extraVolumes and extraVolumeMounts allows you to mount other volumes
|
|
# Example Use Case: mount ssl certificates
|
|
# extraVolumes:
|
|
# - name: ca-certs
|
|
# secret:
|
|
# defaultMode: 420
|
|
# secretName: ca-certs
|
|
# extraVolumeMounts:
|
|
# - name: ca-certs
|
|
# mountPath: /certs
|
|
# readOnly: true
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
## Toolset service account
|
|
## templates/toolset-service-account.yaml
|
|
service_account:
|
|
annotations: {}
|
|
## Toolset configmap
|
|
## templates/toolset-configmap.yaml
|
|
##
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms64M
|
|
-Xmx128M
|
|
-XX:MaxDirectMemorySize=128M
|
|
## Add a custom command to the start up process of the toolset pods (e.g. update-ca-certificates, jvm commands, etc)
|
|
additionalCommand:
|
|
|
|
#############################################################
|
|
### Monitoring Stack : kube-prometheus-stack chart
|
|
#############################################################
|
|
|
|
## Prometheus, Grafana, and the rest of the kube-prometheus-stack are managed by the dependent chart here:
|
|
## https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
|
|
## For sample values, please see their documentation.
|
|
kube-prometheus-stack:
|
|
## Enable the kube-prometheus-stack chart
|
|
enabled: true
|
|
## This applies to deployments which don't use helm's --namespace flag to set the namespace.
|
|
## If Pulsar's namespace is manually set using the `namespace` key, this setting should match the same namespace,
|
|
## otherwise Prometheus will not be able to scrape the Pulsar metrics due to RBAC restrictions.
|
|
## See https://prometheus-operator.dev/kube-prometheus/kube/monitoring-other-namespaces/ if you need to install
|
|
## kube-prometheus-stack in a different namespace than Pulsar.
|
|
# namespaceOverride: ""
|
|
## Manages Prometheus and Alertmanager components
|
|
prometheusOperator:
|
|
enabled: true
|
|
## Prometheus component
|
|
prometheus:
|
|
enabled: true
|
|
## Grafana component
|
|
grafana:
|
|
enabled: true
|
|
# Use random password at installation time for Grafana by default by setting empty value to `adminPassword`.
|
|
# You can find out the actual password by running the following command:
|
|
# kubectl get secret -l app.kubernetes.io/name=grafana -o=jsonpath="{.items[0].data.admin-password}" | base64 --decode
|
|
adminPassword:
|
|
# Configure Pulsar dashboards for Grafana
|
|
dashboardProviders:
|
|
dashboardproviders.yaml:
|
|
apiVersion: 1
|
|
providers:
|
|
- name: 'pulsar'
|
|
orgId: 1
|
|
folder: 'Pulsar'
|
|
type: file
|
|
disableDeletion: true
|
|
editable: true
|
|
options:
|
|
path: /var/lib/grafana/dashboards/pulsar
|
|
dashboards:
|
|
pulsar:
|
|
# Download the maintained dashboards from AL 2.0 licenced repo https://github.com/streamnative/apache-pulsar-grafana-dashboard
|
|
bookkeeper:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/bookkeeper.json
|
|
datasource: Prometheus
|
|
broker:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/broker.json
|
|
datasource: Prometheus
|
|
connector_sink:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_sink.json
|
|
datasource: Prometheus
|
|
connector_source:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_source.json
|
|
datasource: Prometheus
|
|
container:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/container.json
|
|
datasource: Prometheus
|
|
functions:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/functions.json
|
|
datasource: Prometheus
|
|
jvm:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/jvm.json
|
|
datasource: Prometheus
|
|
loadbalance:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/loadbalance.json
|
|
datasource: Prometheus
|
|
messaging:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/messaging.json
|
|
datasource: Prometheus
|
|
node:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/node.json
|
|
datasource: Prometheus
|
|
overview:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/overview.json
|
|
datasource: Prometheus
|
|
proxy:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/proxy.json
|
|
datasource: Prometheus
|
|
recovery:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/recovery.json
|
|
datasource: Prometheus
|
|
topic:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/topic.json
|
|
datasource: Prometheus
|
|
transaction:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/transaction.json
|
|
datasource: Prometheus
|
|
zookeeper:
|
|
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/zookeeper-3.6.json
|
|
datasource: Prometheus
|
|
offloader:
|
|
url: https://raw.githubusercontent.com/apache/pulsar/refs/heads/master/grafana/dashboards/offloader.json
|
|
datasource: Prometheus
|
|
broker-cache:
|
|
url: https://raw.githubusercontent.com/datastax/pulsar-helm-chart/refs/heads/master/helm-chart-sources/pulsar/grafana-dashboards/broker-cache-by-broker.json
|
|
datasource: Prometheus
|
|
sockets:
|
|
url: https://raw.githubusercontent.com/datastax/pulsar-helm-chart/refs/heads/master/helm-chart-sources/pulsar/grafana-dashboards/sockets.json
|
|
datasource: Prometheus
|
|
## Prometheus node exporter component
|
|
prometheus-node-exporter:
|
|
enabled: true
|
|
hostRootFsMount:
|
|
enabled: false
|
|
## Alertmanager component
|
|
alertmanager:
|
|
enabled: false
|
|
|
|
## Components Stack: pulsar_manager
|
|
## templates/pulsar-manager.yaml
|
|
##
|
|
pulsar_manager:
|
|
component: pulsar-manager
|
|
replicaCount: 1
|
|
initContainers: []
|
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
|
restartPodsOnConfigMapChange: false
|
|
# nodeSelector:
|
|
# cloud.google.com/gke-nodepool: default-pool
|
|
# set topologySpreadConstraint to deploy pods across different zones
|
|
topologySpreadConstraints: []
|
|
annotations: {}
|
|
tolerations: []
|
|
extraVolumes: []
|
|
extraVolumeMounts: []
|
|
gracePeriod: 30
|
|
resources:
|
|
requests:
|
|
memory: 250Mi
|
|
cpu: 0.1
|
|
configData:
|
|
REDIRECT_HOST: "http://127.0.0.1"
|
|
REDIRECT_PORT: "9527"
|
|
LOG_LEVEL: "INFO"
|
|
# DB
|
|
URL: "jdbc:postgresql://127.0.0.1:5432/pulsar_manager"
|
|
DRIVER_CLASS_NAME: "org.postgresql.Driver"
|
|
# enables the "message peeking" feature
|
|
PULSAR_PEEK_MESSAGE: "true"
|
|
volumes:
|
|
# use a persistent volume or emptyDir
|
|
persistence: true
|
|
data:
|
|
name: data
|
|
size: 128Mi
|
|
local_storage: true
|
|
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
|
|
##
|
|
# storageClassName: existent-storage-class,
|
|
## If you want to bind static persistent volumes via selectors, e.g.:
|
|
# selector:
|
|
# matchLabels:
|
|
# app: pulsar-bookkeeper-journal
|
|
selector: {}
|
|
## Pulsar manager service
|
|
## templates/pulsar-manager-service.yaml
|
|
##
|
|
service:
|
|
type: ClusterIP
|
|
port: 9527
|
|
targetPort: 9527
|
|
annotations: {}
|
|
## Set external traffic policy to: "Local" to preserve source IP on providers supporting it.
|
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
|
# externalTrafficPolicy: Local
|
|
## Restrict traffic through the load balancer to specified IPs on providers supporting it.
|
|
# loadBalancerSourceRanges:
|
|
# - 10.0.0.0/8
|
|
# Set a loadBalancerClass for loadbalancer service. (example: loadBalancerClass is needed by metallb)
|
|
# loadBalancerClass: ""
|
|
adminService:
|
|
type: ClusterIP
|
|
port: 7750
|
|
targetPort: 7750
|
|
annotations: {}
|
|
## Pulsar manager ingress
|
|
## templates/pulsar-manager-ingress.yaml
|
|
##
|
|
ingress:
|
|
enabled: false
|
|
annotations: {}
|
|
ingressClassName: ""
|
|
tls:
|
|
enabled: false
|
|
|
|
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
|
|
secretName: ""
|
|
|
|
hostname: ""
|
|
path: "/"
|
|
pathType: ImplementationSpecific
|
|
|
|
## On first install, the helm chart tries to reuse an existing secret with matching name by default
|
|
## if this should fail it uses the given username and password to create a new secret
|
|
## if either are missing the default value of "pulsar" is used for the username or a random password is generated
|
|
## And decode any key by using:
|
|
## kubectl get secret -l component=pulsar-manager -o=jsonpath="{.items[0].data.UI_PASSWORD}" | base64 --decode
|
|
admin:
|
|
## Setting a value at existingSecret disables automatic creation of the secret for pulsar_manager admin credentials and instead uses an existing secret to initialize pulsar-manager
|
|
## The existing secret should have the following keys:
|
|
## DB_PASSWORD: <database password>
|
|
## DB_USERNAME: <database username>
|
|
## UI_PASSWORD: <UI password>
|
|
## UI_USERNAME: <UI username>
|
|
existingSecret: ""
|
|
ui_username: "pulsar"
|
|
ui_password: "" # leave empty for random password
|
|
db_username: "pulsar"
|
|
db_password: "" # leave empty for random password
|
|
|
|
# These are jobs where job ttl configuration is used
|
|
# pulsar-helm-chart/charts/pulsar/templates/pulsar-cluster-initialize.yaml
|
|
# pulsar-helm-chart/charts/pulsar/templates/bookkeeper-cluster-initialize.yaml
|
|
# pulsar-helm-chart/charts/pulsar/templates/pulsar-manager-cluster-initialize.yaml
|
|
job:
|
|
ttl:
|
|
enabled: false
|
|
secondsAfterFinished: 3600
|
|
|
|
# This section is intended for cluster providers where all containers, including init containers,
|
|
# need to provide the number of resources they are going to use.
|
|
initContainer:
|
|
resources:
|
|
requests:
|
|
memory: 256Mi
|
|
cpu: 0.1
|