Compare commits

..

No commits in common. "master" and "pulsar-3.9.0-candidate-1" have entirely different histories.

93 changed files with 1820 additions and 4722 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,73 +0,0 @@
{
"clientId": $ARGS.named.CLIENT_ID,
"enabled": true,
"clientAuthenticatorType": "client-secret",
"secret": $ARGS.named.CLIENT_SECRET,
"standardFlowEnabled" : false,
"implicitFlowEnabled" : false,
"serviceAccountsEnabled": true,
"protocol": "openid-connect",
"attributes": {
"realm_client": "false",
"oidc.ciba.grant.enabled": "false",
"client.secret.creation.time": "1735689600",
"backchannel.logout.session.required": "true",
"standard.token.exchange.enabled": "false",
"frontchannel.logout.session.required": "true",
"oauth2.device.authorization.grant.enabled": "false",
"display.on.consent.screen": "false",
"backchannel.logout.revoke.offline.tokens": "false"
},
"protocolMappers": [
{
"name": "sub",
"protocol": "openid-connect",
"protocolMapper": "oidc-hardcoded-claim-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"claim.value": $ARGS.named.SUB_CLAIM_VALUE,
"userinfo.token.claim": "true",
"id.token.claim": "true",
"lightweight.claim": "false",
"access.token.claim": "true",
"claim.name": "sub",
"jsonType.label": "String",
"access.tokenResponse.claim": "false"
}
},
{
"name": "nbf",
"protocol": "openid-connect",
"protocolMapper": "oidc-hardcoded-claim-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"claim.value": "1735689600",
"userinfo.token.claim": "true",
"id.token.claim": "true",
"lightweight.claim": "false",
"access.token.claim": "true",
"claim.name": "nbf",
"jsonType.label": "long",
"access.tokenResponse.claim": "false"
}
}
],
"defaultClientScopes": [
"web-origins",
"service_account",
"acr",
"profile",
"roles",
"basic",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"organization",
"offline_access",
"microprofile-jwt"
]
}

View File

@ -1,26 +0,0 @@
# Keycloak
Keycloak is used to validate OIDC configuration.
To create the pulsar realm configuration, we use :
* `0-realm-pulsar-partial-export.json` : after creating pulsar realm in Keycloack UI, this file is the result of the partial export in Keycloak UI without options.
* `1-client-template.json` : this is the template to create pulsar clients.
To create the final `realm-pulsar.json`, merge files with `jq` command :
* create a client with `CLIENT_ID`, `CLIENT_SECRET` and `SUB_CLAIM_VALUE` :
```
CLIENT_ID=xx
CLIENT_SECRET=yy
SUB_CLAIM_VALUE=zz
jq -n --arg CLIENT_ID "$CLIENT_ID" --arg CLIENT_SECRET "$CLIENT_SECRET" --arg SUB_CLAIM_VALUE "$SUB_CLAIM_VALUE" 1-client-template.json > client.json
```
* then merge the realm and the client :
```
jq '.clients += [input]' 0-realm-pulsar-partial-export.json client.json > realm-pulsar.json
```

View File

@ -1,34 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
tls:
enabled: false
# This block sets up an example Pulsar Realm
# https://www.keycloak.org/server/importExport#_importing_a_realm_from_a_directory
extraEnvVars:
- name: KEYCLOAK_EXTRA_ARGS
value: "--import-realm"
extraVolumes:
- name: realm-config
secret:
secretName: keycloak-ci-realm-config
extraVolumeMounts:
- name: realm-config
mountPath: "/opt/bitnami/keycloak/data/import"
readOnly: true

View File

@ -1,5 +0,0 @@
{
"type": "client_credentials",
"client_id": $ARGS.named.CLIENT_ID,
"client_secret": $ARGS.named.CLIENT_SECRET
}

View File

@ -28,7 +28,6 @@ TLS=${TLS:-"false"}
SYMMETRIC=${SYMMETRIC:-"false"} SYMMETRIC=${SYMMETRIC:-"false"}
FUNCTION=${FUNCTION:-"false"} FUNCTION=${FUNCTION:-"false"}
MANAGER=${MANAGER:-"false"} MANAGER=${MANAGER:-"false"}
ALLOW_LOADBALANCERS=${ALLOW_LOADBALANCERS:-"false"}
source ${PULSAR_HOME}/.ci/helm.sh source ${PULSAR_HOME}/.ci/helm.sh
@ -57,28 +56,21 @@ fi
install_type="install" install_type="install"
test_action="produce-consume" test_action="produce-consume"
if [[ "$UPGRADE_FROM_VERSION" != "" ]]; then if [[ "$UPGRADE_FROM_VERSION" != "" ]]; then
ALLOW_LOADBALANCERS="true"
# install older version of pulsar chart # install older version of pulsar chart
PULSAR_CHART_VERSION="$UPGRADE_FROM_VERSION" PULSAR_CHART_VERSION="$UPGRADE_FROM_VERSION"
ci::install_pulsar_chart install ${PULSAR_HOME}/.ci/values-common.yaml ${PULSAR_HOME}/${VALUES_FILE} "${extra_opts[@]}"
# Install Prometheus Operator CRDs using the upgrade script since kube-prometheus-stack is now disabled before the upgrade
${PULSAR_HOME}/scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh
ci::install_pulsar_chart install ${PULSAR_HOME}/.ci/values-common.yaml ${PULSAR_HOME}/${VALUES_FILE} --set kube-prometheus-stack.enabled=false "${extra_opts[@]}"
install_type="upgrade" install_type="upgrade"
echo "Wait 10 seconds" echo "Wait 10 seconds"
sleep 10 sleep 10
# check pulsar environment
ci::check_pulsar_environment
# test that we can access the admin api # test that we can access the admin api
ci::test_pulsar_admin_api_access ci::test_pulsar_admin_api_access
# produce messages with old version of pulsar and consume with new version # produce messages with old version of pulsar and consume with new version
ci::test_pulsar_producer_consumer "produce" ci::test_pulsar_producer_consumer "produce"
test_action="consume" test_action="consume"
if [[ "$(ci::helm_values_for_deployment | yq .victoria-metrics-k8s-stack.enabled)" == "true" ]]; then if [[ "$(ci::helm_values_for_deployment | yq .kube-prometheus-stack.enabled)" == "true" ]]; then
echo "Upgrade Victoria Metrics Operator CRDs before upgrading the deployment" echo "Upgrade Prometheus Operator CRDs before upgrading the deployment"
${PULSAR_HOME}/scripts/victoria-metrics-k8s-stack/upgrade_vm_operator_crds.sh ${PULSAR_HOME}/scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh
fi fi
fi fi
@ -89,11 +81,6 @@ ci::install_pulsar_chart ${install_type} ${PULSAR_HOME}/.ci/values-common.yaml $
echo "Wait 10 seconds" echo "Wait 10 seconds"
sleep 10 sleep 10
# check that there aren't any loadbalancers if ALLOW_LOADBALANCERS is false
if [[ "${ALLOW_LOADBALANCERS}" == "false" ]]; then
ci::check_loadbalancers
fi
# check pulsar environment # check pulsar environment
ci::check_pulsar_environment ci::check_pulsar_environment

View File

@ -1,105 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# enable TLS with cacerts
tls:
enabled: true
proxy:
enabled: true
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
broker:
enabled: true
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
bookie:
enabled: true
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
zookeeper:
enabled: true
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
toolset:
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
autorecovery:
cacerts:
enabled: true
certs:
- name: common-cacert
existingSecret: "pulsar-ci-common-cacert"
secretKeys:
- ca.crt
# enable cert-manager
certs:
internal_issuer:
enabled: true
type: selfsigning
# deploy cacerts
extraDeploy:
- |
apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-common-cacert"
namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
spec:
secretName: "{{ template "pulsar.fullname" . }}-common-cacert"
commonName: "common-cacert"
duration: "{{ .Values.certs.internal_issuer.duration }}"
renewBefore: "{{ .Values.certs.internal_issuer.renewBefore }}"
usages:
- server auth
- client auth
isCA: true
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}"
kind: Issuer
group: cert-manager.io

View File

@ -21,9 +21,9 @@
auth: auth:
authentication: authentication:
enabled: true enabled: true
provider: "jwt"
jwt: jwt:
# Enable JWT authentication # Enable JWT authentication
enabled: true
# If the token is generated by a secret key, set the usingSecretKey as true. # If the token is generated by a secret key, set the usingSecretKey as true.
# If the token is generated by a private key, set the usingSecretKey as false. # If the token is generated by a private key, set the usingSecretKey as false.
usingSecretKey: false usingSecretKey: false

View File

@ -21,9 +21,9 @@
auth: auth:
authentication: authentication:
enabled: true enabled: true
provider: "jwt"
jwt: jwt:
# Enable JWT authentication # Enable JWT authentication
enabled: true
# If the token is generated by a secret key, set the usingSecretKey as true. # If the token is generated by a secret key, set the usingSecretKey as true.
# If the token is generated by a private key, set the usingSecretKey as false. # If the token is generated by a private key, set the usingSecretKey as false.
usingSecretKey: true usingSecretKey: true

View File

@ -1,94 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Mount crendentials to each component
proxy:
configData:
# Authentication settings of the broker itself. Used when the broker connects to other brokers, or when the proxy connects to brokers, either in same or other clusters
brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2"
brokerClientAuthenticationParameters: '{"privateKey":"file:///pulsar/auth/proxy/credentials_file.json","audience":"account","issuerUrl":"http://keycloak-ci-headless:8080/realms/pulsar"}'
extraVolumes:
- name: pulsar-proxy-credentials
secret:
secretName: pulsar-proxy-credentials
extraVolumeMounts:
- name: pulsar-proxy-credentials
mountPath: "/pulsar/auth/proxy"
readOnly: true
broker:
configData:
# Authentication settings of the broker itself. Used when the broker connects to other brokers, or when the proxy connects to brokers, either in same or other clusters
brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2"
brokerClientAuthenticationParameters: '{"privateKey":"file:///pulsar/auth/broker/credentials_file.json","audience":"account","issuerUrl":"http://keycloak-ci-headless:8080/realms/pulsar"}'
extraVolumes:
- name: pulsar-broker-credentials
secret:
secretName: pulsar-broker-credentials
extraVolumeMounts:
- name: pulsar-broker-credentials
mountPath: "/pulsar/auth/broker"
readOnly: true
toolset:
configData:
authPlugin: "org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2"
authParams: '{"privateKey":"file:///pulsar/auth/admin/credentials_file.json","audience":"account","issuerUrl":"http://keycloak-ci-headless:8080/realms/pulsar"}'
extraVolumes:
- name: pulsar-admin-credentials
secret:
secretName: pulsar-admin-credentials
extraVolumeMounts:
- name: pulsar-admin-credentials
mountPath: "/pulsar/auth/admin"
readOnly: true
auth:
authentication:
enabled: true
openid:
# Enable openid authentication
enabled: true
# https://pulsar.apache.org/docs/next/security-openid-connect/#enable-openid-connect-authentication-in-the-broker-and-proxy
openIDAllowedTokenIssuers:
- http://keycloak-ci-headless:8080/realms/pulsar
openIDAllowedAudiences:
- account
#openIDTokenIssuerTrustCertsFilePath:
openIDRoleClaim: "sub"
openIDAcceptedTimeLeewaySeconds: "0"
openIDCacheSize: "5"
openIDCacheRefreshAfterWriteSeconds: "64800"
openIDCacheExpirationSeconds: "86400"
openIDHttpConnectionTimeoutMillis: "10000"
openIDHttpReadTimeoutMillis: "10000"
openIDKeyIdCacheMissRefreshSeconds: "300"
openIDRequireIssuersUseHttps: "false"
openIDFallbackDiscoveryMode: "DISABLED"
authorization:
enabled: true
superUsers:
# broker to broker communication
broker: "broker-admin"
# proxy to broker communication
proxy: "proxy-admin"
# pulsar-admin client to broker/proxy communication
client: "admin"
# pulsar manager to broker
manager: "manager-admin"

View File

@ -17,16 +17,14 @@
# under the License. # under the License.
# #
apiVersion: kustomize.config.k8s.io/v1beta1 kube-prometheus-stack:
kind: Kustomization enabled: true
helmGlobals: prometheus:
chartHome: ../charts enabled: true
helmCharts: grafana:
- name: pulsar enabled: true
releaseName: pulsar adminPassword: pulsar-ci-admin
valuesInline: alertmanager:
victoria-metrics-k8s-stack: enabled: false
enabled: false prometheus-node-exporter:
components: enabled: true
pulsar_manager: true
zookeeper: true

View File

@ -17,7 +17,6 @@
# under the License. # under the License.
# #
{{- range .Values.extraDeploy }} rbac:
--- enabled: true
{{ include "common.tplvalues.render" (dict "value" . "context" $) }} psp: true
{{- end }}

View File

@ -17,4 +17,4 @@
# under the License. # under the License.
# #
defaultPulsarImageTag: 3.0.12 defaultPulsarImageTag: 3.0.9

View File

@ -1,60 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
victoria-metrics-k8s-stack:
enabled: true
victoria-metrics-operator:
enabled: true
vmsingle:
enabled: true
vmagent:
enabled: true
grafana:
enabled: true
adminPassword: pulsar-ci-admin
prometheus-node-exporter:
enabled: true
zookeeper:
podMonitor:
enabled: true
bookkeeper:
podMonitor:
enabled: true
broker:
podMonitor:
enabled: true
autorecovery:
podMonitor:
enabled: true
proxy:
podMonitor:
enabled: true
oxia:
coordinator:
podMonitor:
enabled: true
server:
podMonitor:
enabled: true

View File

@ -27,7 +27,7 @@ function k9s() {
# install k9s on the fly # install k9s on the fly
if [ ! -x /usr/local/bin/k9s ]; then if [ ! -x /usr/local/bin/k9s ]; then
echo "Installing k9s..." echo "Installing k9s..."
curl -L -s https://github.com/derailed/k9s/releases/download/v0.40.5/k9s_Linux_amd64.tar.gz | sudo tar xz -C /usr/local/bin k9s curl -L -s https://github.com/derailed/k9s/releases/download/v0.32.5/k9s_Linux_amd64.tar.gz | sudo tar xz -C /usr/local/bin k9s
fi fi
command k9s "$@" command k9s "$@"
} }

170
.ci/helm.sh Executable file → Normal file
View File

@ -84,14 +84,13 @@ function ci::install_cert_manager() {
function ci::helm_repo_add() { function ci::helm_repo_add() {
echo "Adding the helm repo ..." echo "Adding the helm repo ..."
${HELM} repo add prometheus-community https://prometheus-community.github.io/helm-charts ${HELM} repo add prometheus-community https://prometheus-community.github.io/helm-charts
${HELM} repo add vm https://victoriametrics.github.io/helm-charts/
${HELM} repo update ${HELM} repo update
echo "Successfully added the helm repo." echo "Successfully added the helm repo."
} }
function ci::print_pod_logs() { function ci::print_pod_logs() {
echo "Logs for all containers:" echo "Logs for all pulsar containers:"
for k8sobject in $(${KUBECTL} get pods,jobs -n ${NAMESPACE} -o=name); do for k8sobject in $(${KUBECTL} get pods,jobs -n ${NAMESPACE} -l app=pulsar -o=name); do
${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true --tail=100 || true ${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true --tail=100 || true
done; done;
} }
@ -99,7 +98,7 @@ function ci::print_pod_logs() {
function ci::collect_k8s_logs() { function ci::collect_k8s_logs() {
mkdir -p "${K8S_LOGS_DIR}" && cd "${K8S_LOGS_DIR}" mkdir -p "${K8S_LOGS_DIR}" && cd "${K8S_LOGS_DIR}"
echo "Collecting k8s logs to ${K8S_LOGS_DIR}" echo "Collecting k8s logs to ${K8S_LOGS_DIR}"
for k8sobject in $(${KUBECTL} get pods,jobs -n ${NAMESPACE} -o=name); do for k8sobject in $(${KUBECTL} get pods,jobs -n ${NAMESPACE} -l app=pulsar -o=name); do
filebase="${k8sobject//\//_}" filebase="${k8sobject//\//_}"
${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true > "${filebase}.$$.log.txt" || true ${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true > "${filebase}.$$.log.txt" || true
${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true --previous=true > "${filebase}.previous.$$.log.txt" || true ${KUBECTL} logs -n ${NAMESPACE} "$k8sobject" --all-containers=true --ignore-errors=true --prefix=true --previous=true > "${filebase}.previous.$$.log.txt" || true
@ -118,7 +117,7 @@ function ci::install_pulsar_chart() {
local extra_opts=() local extra_opts=()
local values_next=false local values_next=false
for arg in "$@"; do for arg in "$@"; do
if [[ "$arg" == "--values" || "$arg" == "--set" ]]; then if [[ "$arg" == "--values" ]]; then
extra_values+=("$arg") extra_values+=("$arg")
values_next=true values_next=true
elif [[ "$values_next" == true ]]; then elif [[ "$values_next" == true ]]; then
@ -149,11 +148,6 @@ function ci::install_pulsar_chart() {
# configure metallb # configure metallb
${KUBECTL} apply -f ${BINDIR}/metallb/metallb-config.yaml ${KUBECTL} apply -f ${BINDIR}/metallb/metallb-config.yaml
install_args="" install_args=""
# create auth resources
if [[ "x${AUTHENTICATION_PROVIDER}" == "xopenid" ]]; then
ci::create_openid_resources
fi
else else
install_args="--wait --wait-for-jobs --timeout 360s --debug" install_args="--wait --wait-for-jobs --timeout 360s --debug"
fi fi
@ -277,7 +271,6 @@ function ci::retry() {
} }
function ci::test_pulsar_admin_api_access() { function ci::test_pulsar_admin_api_access() {
echo "Test pulsar admin api access"
ci::retry ${KUBECTL} exec -n ${NAMESPACE} ${CLUSTER}-toolset-0 -- bin/pulsar-admin tenants list ci::retry ${KUBECTL} exec -n ${NAMESPACE} ${CLUSTER}-toolset-0 -- bin/pulsar-admin tenants list
} }
@ -430,158 +423,3 @@ function ci::test_pulsar_manager() {
exit 1 exit 1
fi fi
} }
function ci::check_loadbalancers() {
(
set +e
${KUBECTL} get services -n ${NAMESPACE} | grep LoadBalancer
if [ $? -eq 0 ]; then
echo "Error: Found service with type LoadBalancer. This is not allowed because of security reasons."
exit 1
fi
exit 0
)
}
function ci::validate_kustomize_yaml() {
# if kustomize is not installed, install kustomize to a temp directory
if ! command -v kustomize &> /dev/null; then
KUSTOMIZE_VERSION=5.6.0
KUSTOMIZE_DIR=$(mktemp -d)
echo "Installing kustomize ${KUSTOMIZE_VERSION} to ${KUSTOMIZE_DIR}"
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash -s ${KUSTOMIZE_VERSION} ${KUSTOMIZE_DIR}
export PATH=${KUSTOMIZE_DIR}:$PATH
fi
# prevent regression of https://github.com/apache/pulsar-helm-chart/issues/569
local kustomize_yaml_dir=$(mktemp -d)
cp ${PULSAR_HOME}/.ci/kustomization.yaml ${kustomize_yaml_dir}
PULSAR_HOME=${PULSAR_HOME} yq -i '.helmGlobals.chartHome = env(PULSAR_HOME) + "/charts"' ${kustomize_yaml_dir}/kustomization.yaml
failures=0
# validate zookeeper init
echo "Validating kustomize yaml output with zookeeper init"
_ci::validate_kustomize_yaml ${kustomize_yaml_dir} || ((failures++))
# validate oxia init
yq -i '.helmCharts[0].valuesInline.components += {"zookeeper": false, "oxia": true}' ${kustomize_yaml_dir}/kustomization.yaml
echo "Validating kustomize yaml output with oxia init"
_ci::validate_kustomize_yaml ${kustomize_yaml_dir} || ((failures++))
if [ $failures -gt 0 ]; then
exit 1
fi
}
function _ci::validate_kustomize_yaml() {
local kustomize_yaml_dir=$1
kustomize build --enable-helm --helm-kube-version 1.23.0 --load-restrictor=LoadRestrictionsNone ${kustomize_yaml_dir} | yq 'select(.spec.template.spec.containers[0].args != null) | .spec.template.spec.containers[0].args' | \
awk '{
if (prev_line ~ /\\$/ && $0 ~ /^$/) {
print "Found issue: backslash at end of line followed by empty line. Must use pipe character for multiline strings to support kustomize due to kubernetes-sigs/kustomize#4201.";
print "Line: " prev_line;
has_issue = 1;
}
prev_line = $0;
}
END {
if (!has_issue) {
print "No issues found: no backslash followed by empty line";
exit 0;
}
exit 1;
}'
}
# Create all resources needed for openid authentication
function ci::create_openid_resources() {
echo "Creating openid resources"
cp ${PULSAR_HOME}/.ci/auth/keycloak/0-realm-pulsar-partial-export.json /tmp/realm-pulsar.json
for component in broker proxy admin manager; do
echo "Creating openid resources for ${component}"
local client_id=pulsar-${component}
# Github action hang up when read string from /dev/urandom, so use python to generate a random string
local client_secret=$(python -c "import secrets; import string; length = 32; random_string = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length)); print(random_string);")
if [[ "${component}" == "admin" ]]; then
local sub_claim_value="admin"
else
local sub_claim_value="${component}-admin"
fi
# Create the client credentials file
jq -n --arg CLIENT_ID $client_id --arg CLIENT_SECRET "$client_secret" -f ${PULSAR_HOME}/.ci/auth/oauth2/credentials_file.json > /tmp/${component}-credentials_file.json
# Create the secret for the client credentials
local secret_name="pulsar-${component}-credentials"
${KUBECTL} create secret generic ${secret_name} --from-file=credentials_file.json=/tmp/${component}-credentials_file.json -n ${NAMESPACE}
# Create the keycloak client file
jq -n --arg CLIENT_ID $client_id --arg CLIENT_SECRET "$client_secret" --arg SUB_CLAIM_VALUE "$sub_claim_value" -f ${PULSAR_HOME}/.ci/auth/keycloak/1-client-template.json > /tmp/${component}-keycloak-client.json
# Merge the keycloak client file with the realm
jq '.clients += [input]' /tmp/realm-pulsar.json /tmp/${component}-keycloak-client.json > /tmp/realm-pulsar.json.tmp
mv /tmp/realm-pulsar.json.tmp /tmp/realm-pulsar.json
done
echo "Create keycloak realm configuration"
${KUBECTL} create secret generic keycloak-ci-realm-config --from-file=realm-pulsar.json=/tmp/realm-pulsar.json -n ${NAMESPACE}
echo "Installing keycloak helm chart"
${HELM} install keycloak-ci oci://registry-1.docker.io/bitnamicharts/keycloak --version 24.6.4 --values ${PULSAR_HOME}/.ci/auth/keycloak/values.yaml -n ${NAMESPACE}
echo "Wait until keycloak is running"
WC=$(${KUBECTL} get pods -n ${NAMESPACE} --field-selector=status.phase=Running | grep keycloak-ci-0 | wc -l)
counter=1
while [[ ${WC} -lt 1 ]]; do
((counter++))
echo ${WC};
sleep 15
${KUBECTL} get pods,jobs -n ${NAMESPACE}
${KUBECTL} get events --sort-by=.lastTimestamp -A | tail -n 30 || true
if [[ $((counter % 20)) -eq 0 ]]; then
ci::print_pod_logs
if [[ $counter -gt 100 ]]; then
echo >&2 "Timeout waiting..."
exit 1
fi
fi
WC=$(${KUBECTL} get pods -n ${NAMESPACE} --field-selector=status.phase=Running | grep keycloak-ci-0 | wc -l)
done
echo "Wait until keycloak is ready"
${KUBECTL} wait --for=condition=Ready pod/keycloak-ci-0 -n ${NAMESPACE} --timeout 180s
echo "Check keycloack realm pulsar issuer url"
${KUBECTL} exec -n ${NAMESPACE} keycloak-ci-0 -c keycloak -- bash -c 'curl -sSL http://keycloak-ci-headless:8080/realms/pulsar'
}
# lists all available functions in this tool
function ci::list_functions() {
declare -F | awk '{print $NF}' | sort | grep -E '^ci::' | sed 's/^ci:://'
}
# Only run this section if the script is being executed directly (not sourced)
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
if [ -z "$1" ]; then
echo "usage: $0 [function_name]"
echo "Available functions:"
ci::list_functions
exit 1
fi
ci_function_name="ci::$1"
shift
if [[ "$(LC_ALL=C type -t "${ci_function_name}")" == "function" ]]; then
eval "$ci_function_name" "$@"
exit $?
else
echo "Invalid ci function"
echo "Available functions:"
ci::list_functions
exit 1
fi
fi

View File

@ -17,35 +17,15 @@
# under the License. # under the License.
# #
victoria-metrics-k8s-stack: kube-prometheus-stack:
enabled: false enabled: false
victoria-metrics-operator: prometheusOperator:
enabled: false
vmsingle:
enabled: false
vmagent:
enabled: false
vmalert:
enabled: false
alertmanager:
enabled: false enabled: false
grafana: grafana:
enabled: false enabled: false
prometheus-node-exporter: alertmanager:
enabled: false enabled: false
kube-state-metrics: prometheus:
enabled: false
kubelet:
enabled: false
kubeApiServer:
enabled: false
kubeControllerManager:
enabled: false
coreDns:
enabled: false
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false enabled: false
# disabled AntiAffinity # disabled AntiAffinity
@ -75,12 +55,6 @@ bookkeeper:
diskUsageWarnThreshold: "0.999" diskUsageWarnThreshold: "0.999"
PULSAR_PREFIX_diskUsageThreshold: "0.999" PULSAR_PREFIX_diskUsageThreshold: "0.999"
PULSAR_PREFIX_diskUsageWarnThreshold: "0.999" PULSAR_PREFIX_diskUsageWarnThreshold: "0.999"
# minimal memory use for bookkeeper
# https://bookkeeper.apache.org/docs/reference/config#db-ledger-storage-settings
dbStorage_writeCacheMaxSizeMb: "32"
dbStorage_readAheadCacheMaxSizeMb: "32"
dbStorage_rocksDB_writeBufferSizeMB: "8"
dbStorage_rocksDB_blockCacheSize: "8388608"
broker: broker:
replicaCount: 1 replicaCount: 1

View File

@ -39,15 +39,15 @@ inputs:
version: version:
description: "The chart-testing version to install" description: "The chart-testing version to install"
required: false required: false
default: v3.12.0 default: v3.11.0
yamllint_version: yamllint_version:
description: "The yamllint version to install" description: "The yamllint version to install"
required: false required: false
default: '1.35.1' default: '1.33.0'
yamale_version: yamale_version:
description: "The yamale version to install" description: "The yamale version to install"
required: false required: false
default: '6.0.0' default: '4.0.4'
runs: runs:
using: composite using: composite
steps: steps:

View File

@ -35,9 +35,9 @@ set -o errexit
set -o nounset set -o nounset
set -o pipefail set -o pipefail
DEFAULT_CHART_TESTING_VERSION=v3.12.0 DEFAULT_CHART_TESTING_VERSION=v3.11.0
DEFAULT_YAMLLINT_VERSION=1.35.1 DEFAULT_YAMLLINT_VERSION=1.33.0
DEFAULT_YAMALE_VERSION=6.0.0 DEFAULT_YAMALE_VERSION=4.0.4
ARCH=$(uname -m) ARCH=$(uname -m)
case $ARCH in case $ARCH in
@ -131,24 +131,18 @@ install_chart_testing() {
tar -xzf ct.tar.gz -C "$cache_dir" tar -xzf ct.tar.gz -C "$cache_dir"
rm -f ct.tar.gz rm -f ct.tar.gz
# if uv (https://docs.astral.sh/uv/) is not installed, install it
if ! command -v uv &> /dev/null; then
echo 'Installing uv...'
curl -LsSf https://astral.sh/uv/install.sh | sh
fi
echo 'Creating virtual Python environment...' echo 'Creating virtual Python environment...'
uv venv "$venv_dir" python3 -m venv "$venv_dir"
echo 'Activating virtual environment...' echo 'Activating virtual environment...'
# shellcheck disable=SC1090 # shellcheck disable=SC1090
source "$venv_dir/bin/activate" source "$venv_dir/bin/activate"
echo 'Installing yamllint...' echo 'Installing yamllint...'
uv pip install "yamllint==${yamllint_version}" pip3 install "yamllint==${yamllint_version}"
echo 'Installing Yamale...' echo 'Installing Yamale...'
uv pip install "yamale==${yamale_version}" pip3 install "yamale==${yamale_version}"
fi fi
# https://github.com/helm/chart-testing-action/issues/62 # https://github.com/helm/chart-testing-action/issues/62

View File

@ -53,8 +53,8 @@ runs:
# tune filesystem mount options, https://www.kernel.org/doc/Documentation/filesystems/ext4.txt # tune filesystem mount options, https://www.kernel.org/doc/Documentation/filesystems/ext4.txt
# commit=999999, effectively disables automatic syncing to disk (default is every 5 seconds) # commit=999999, effectively disables automatic syncing to disk (default is every 5 seconds)
# nobarrier/barrier=0, loosen data consistency on system crash (no negative impact to empheral CI nodes) # nobarrier/barrier=0, loosen data consistency on system crash (no negative impact to empheral CI nodes)
sudo mount -o remount,nodiscard,commit=999999,barrier=0 / || true sudo mount -o remount,nodiscard,commit=999999,barrier=0 /
sudo mount -o remount,nodiscard,commit=999999,barrier=0 /mnt || true sudo mount -o remount,nodiscard,commit=999999,barrier=0 /mnt
# disable discard/trim at device level since remount with nodiscard doesn't seem to be effective # disable discard/trim at device level since remount with nodiscard doesn't seem to be effective
# https://www.spinics.net/lists/linux-ide/msg52562.html # https://www.spinics.net/lists/linux-ide/msg52562.html
for i in /sys/block/sd*/queue/discard_max_bytes; do for i in /sys/block/sd*/queue/discard_max_bytes; do
@ -77,6 +77,12 @@ runs:
# stop Azure Linux agent to save RAM # stop Azure Linux agent to save RAM
sudo systemctl stop walinuxagent.service || true sudo systemctl stop walinuxagent.service || true
# enable docker experimental mode which is
# required for using "docker build --squash" / "-Ddocker.squash=true"
daemon_json="$(sudo cat /etc/docker/daemon.json | jq '.experimental = true')"
echo "$daemon_json" | sudo tee /etc/docker/daemon.json
# restart docker daemon
sudo systemctl restart docker
echo '::endgroup::' echo '::endgroup::'
# show memory # show memory

View File

@ -32,10 +32,9 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
preconditions: preconditions:
name: Preconditions name: Preconditions
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
if: (github.event_name != 'schedule') || (github.repository == 'apache/pulsar-helm-chart') if: (github.event_name != 'schedule') || (github.repository == 'apache/pulsar-helm-chart')
outputs: outputs:
docs_only: ${{ steps.check_changes.outputs.docs_only }} docs_only: ${{ steps.check_changes.outputs.docs_only }}
@ -63,7 +62,7 @@ jobs:
license-check: license-check:
needs: preconditions needs: preconditions
name: License Check name: License Check
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
timeout-minutes: 10 timeout-minutes: 10
if: ${{ needs.preconditions.outputs.docs_only != 'true' }} if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
steps: steps:
@ -84,7 +83,7 @@ jobs:
ct-lint: ct-lint:
needs: ['preconditions', 'license-check'] needs: ['preconditions', 'license-check']
name: chart-testing lint name: chart-testing lint
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
timeout-minutes: 45 timeout-minutes: 45
if: ${{ needs.preconditions.outputs.docs_only != 'true' }} if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
steps: steps:
@ -108,17 +107,13 @@ jobs:
if: ${{ steps.check_changes.outputs.docs_only != 'true' }} if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
uses: azure/setup-helm@v4 uses: azure/setup-helm@v4
with: with:
version: v3.16.4 version: v3.14.4
- name: Set up Python - name: Set up Python
if: ${{ steps.check_changes.outputs.docs_only != 'true' }} if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: '3.12' python-version: '3.9'
- name: Install uv, a fast modern package manager for Python
if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Set up chart-testing - name: Set up chart-testing
if: ${{ steps.check_changes.outputs.docs_only != 'true' }} if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
@ -132,7 +127,7 @@ jobs:
--validate-maintainers=false \ --validate-maintainers=false \
--target-branch ${{ github.event.repository.default_branch }} --target-branch ${{ github.event.repository.default_branch }}
- name: Run kubeconform check for helm template with every major k8s version 1.25.0-1.32.0 - name: Run kubeconform check for helm template with every major k8s version 1.23.0-1.30.0
if: ${{ steps.check_changes.outputs.docs_only != 'true' }} if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
run: | run: |
PULSAR_CHART_HOME=$(pwd) PULSAR_CHART_HOME=$(pwd)
@ -152,25 +147,16 @@ jobs:
else else
echo "" echo ""
fi fi
helm template charts/pulsar --set victoria-metrics-k8s-stack.enabled=false --set components.pulsar_manager=true --kube-version $kube_version "$@" | \ helm template charts/pulsar --set kube-prometheus-stack.enabled=false --set components.pulsar_manager=true --kube-version $kube_version "$@" | \
kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' -strict -kubernetes-version $kube_version -summary kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' -strict -kubernetes-version $kube_version -summary
} }
set -o pipefail set -o pipefail
for k8s_version_part in {25..32}; do for k8s_version_part in {23..30}; do
k8s_version="1.${k8s_version_part}.0" k8s_version="1.${k8s_version_part}.0"
echo "Validating default values with k8s version $k8s_version"
validate_helm_template_with_k8s_version $k8s_version validate_helm_template_with_k8s_version $k8s_version
for config in .ci/clusters/*.yaml; do echo "Validating with Oxia enabled"
echo "Validating $config with k8s version $k8s_version" validate_helm_template_with_k8s_version $k8s_version --set components.zookeeper=false --set components.oxia=true
validate_helm_template_with_k8s_version $k8s_version --values .ci/values-common.yaml --values $config
done
done done
- name: Validate kustomize yaml for extra new lines in pulsar-init commands
if: ${{ steps.check_changes.outputs.docs_only != 'true' }}
run: |
./.ci/helm.sh validate_kustomize_yaml
- name: Wait for ssh connection when build fails - name: Wait for ssh connection when build fails
# ssh access is enabled for builds in own forks # ssh access is enabled for builds in own forks
uses: ./.github/actions/ssh-access uses: ./.github/actions/ssh-access
@ -181,20 +167,19 @@ jobs:
install-chart-tests: install-chart-tests:
name: ${{ matrix.testScenario.name }} - k8s ${{ matrix.k8sVersion.version }} - ${{ matrix.testScenario.type || 'install' }} name: ${{ matrix.testScenario.name }} - k8s ${{ matrix.k8sVersion.version }} - ${{ matrix.testScenario.type || 'install' }}
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
timeout-minutes: ${{ matrix.testScenario.timeout || 45 }} timeout-minutes: ${{ matrix.testScenario.timeout || 45 }}
needs: ['preconditions', 'ct-lint'] needs: ['preconditions', 'ct-lint']
if: ${{ needs.preconditions.outputs.docs_only != 'true' }} if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
# see https://github.com/kubernetes-sigs/kind/releases/tag/v0.27.0 for the list of supported k8s versions for kind 0.27.0 # see https://github.com/kubernetes-sigs/kind/releases/tag/v0.22.0 for the list of supported k8s versions for kind 0.22.0
# docker images are available at https://hub.docker.com/r/kindest/node/tags
k8sVersion: k8sVersion:
- version: "1.25.16" - version: "1.23.17"
kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025 kind_image_tag: v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3
- version: "1.32.2" - version: "1.29.2"
kind_image_tag: v1.32.2@sha256:f226345927d7e348497136874b6d207e0b32cc52154ad8323129352923a3142f kind_image_tag: v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245
testScenario: testScenario:
- name: Upgrade latest released version - name: Upgrade latest released version
values_file: .ci/clusters/values-upgrade.yaml values_file: .ci/clusters/values-upgrade.yaml
@ -224,39 +209,44 @@ jobs:
- name: ZK & BK TLS Only - name: ZK & BK TLS Only
values_file: .ci/clusters/values-zkbk-tls.yaml values_file: .ci/clusters/values-zkbk-tls.yaml
shortname: zkbk-tls shortname: zkbk-tls
- name: PSP
values_file: .ci/clusters/values-psp.yaml
shortname: psp
- name: Pulsar Manager - name: Pulsar Manager
values_file: .ci/clusters/values-pulsar-manager.yaml values_file: .ci/clusters/values-pulsar-manager.yaml
shortname: pulsar-manager shortname: pulsar-manager
- name: Oxia - name: Oxia
values_file: .ci/clusters/values-oxia.yaml values_file: .ci/clusters/values-oxia.yaml
shortname: oxia shortname: oxia
- name: OpenID
values_file: .ci/clusters/values-openid.yaml
shortname: openid
- name: CA certificates
values_file: .ci/clusters/values-cacerts.yaml
shortname: cacerts
include: include:
- k8sVersion: - k8sVersion:
version: "1.25.16" version: "1.23.17"
kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025 kind_image_tag: v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3
testScenario: testScenario:
name: "Upgrade TLS" name: "Upgrade TLS"
values_file: .ci/clusters/values-tls.yaml values_file: .ci/clusters/values-tls.yaml
shortname: tls shortname: tls
type: upgrade type: upgrade
- k8sVersion: - k8sVersion:
version: "1.25.16" version: "1.23.17"
kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025 kind_image_tag: v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3
testScenario: testScenario:
name: "Upgrade victoria-metrics-k8s-stack for previous LTS" name: "Upgrade PSP"
values_file: .ci/clusters/values-victoria-metrics-grafana.yaml --values .ci/clusters/values-pulsar-previous-lts.yaml values_file: .ci/clusters/values-psp.yaml
shortname: victoria-metrics-grafana shortname: psp
type: upgrade
- k8sVersion:
version: "1.23.17"
kind_image_tag: v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3
testScenario:
name: "Upgrade kube-prometheus-stack for previous LTS"
values_file: .ci/clusters/values-prometheus-grafana.yaml --values .ci/clusters/values-pulsar-previous-lts.yaml
shortname: prometheus-grafana
type: upgrade type: upgrade
upgradeFromVersion: 3.2.0 upgradeFromVersion: 3.2.0
- k8sVersion: - k8sVersion:
version: "1.25.16" version: "1.23.17"
kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025 kind_image_tag: v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3
testScenario: testScenario:
name: "TLS with helm 3.12.0" name: "TLS with helm 3.12.0"
values_file: .ci/clusters/values-tls.yaml values_file: .ci/clusters/values-tls.yaml
@ -296,9 +286,6 @@ jobs:
"jwt-asymmetric") "jwt-asymmetric")
export EXTRA_SUPERUSERS=manager-admin export EXTRA_SUPERUSERS=manager-admin
;; ;;
"openid")
export AUTHENTICATION_PROVIDER=openid
;;
esac esac
if [[ "${{ matrix.testScenario.type || 'install' }}" == "upgrade" ]]; then if [[ "${{ matrix.testScenario.type || 'install' }}" == "upgrade" ]]; then
export UPGRADE_FROM_VERSION="${{ matrix.testScenario.upgradeFromVersion || 'latest' }}" export UPGRADE_FROM_VERSION="${{ matrix.testScenario.upgradeFromVersion || 'latest' }}"
@ -337,7 +324,7 @@ jobs:
pulsar-helm-chart-ci-checks-completed: pulsar-helm-chart-ci-checks-completed:
name: "CI checks completed" name: "CI checks completed"
if: ${{ always() && ((github.event_name != 'schedule') || (github.repository == 'apache/pulsar-helm-chart')) }} if: ${{ always() && ((github.event_name != 'schedule') || (github.repository == 'apache/pulsar-helm-chart')) }}
runs-on: ubuntu-24.04 runs-on: ubuntu-22.04
timeout-minutes: 10 timeout-minutes: 10
needs: [ needs: [
'preconditions', 'preconditions',

2
.gitignore vendored
View File

@ -17,3 +17,5 @@ charts/**/*.lock
PRIVATEKEY PRIVATEKEY
PUBLICKEY PUBLICKEY
.vagrant/ .vagrant/
pulsarctl-*-*.tar.gz
pulsarctl-*-*/

312
README.md
View File

@ -27,113 +27,27 @@ Read [Deploying Pulsar on Kubernetes](http://pulsar.apache.org/docs/deploy-kuber
> :warning: This helm chart is updated outside of the regular Pulsar release cycle and might lag behind a bit. It only supports basic Kubernetes features now. Currently, it can be used as no more than a template and starting point for a Kubernetes deployment. In many cases, it would require some customizations. > :warning: This helm chart is updated outside of the regular Pulsar release cycle and might lag behind a bit. It only supports basic Kubernetes features now. Currently, it can be used as no more than a template and starting point for a Kubernetes deployment. In many cases, it would require some customizations.
## Important Security Advisory for Helm Chart Usage ## Important Security Disclaimer for Helm Chart Usage
### Notice of Default Configuration ### Notice of Default Configuration
This Helm chart is provided with a default configuration that does not meet the security requirements for production environments or sensitive data handling. Users are strongly advised to thoroughly review and customize the security settings to ensure a secure deployment that aligns with their specific operational and security policies.
This Helm chart's default configuration DOES NOT meet production security requirements.
Users MUST review and customize security settings for their specific environment.
IMPORTANT: This Helm chart provides a starting point for Pulsar deployments but requires
significant security customization before use in production environments. We strongly
recommend implementing:
1. Authentication and authorization for all components
2. TLS encryption for all communication channels
3. Proper network isolation and access controls
4. Regular security updates and vulnerability assessments
As an open source project, we welcome contributions to improve security features.
Please consider submitting pull requests to address security gaps or enhance
existing security implementations.
### Pulsar Proxy Security Considerations ### Pulsar Proxy Security Considerations
As per the [Pulsar Proxy documentation](https://pulsar.apache.org/docs/3.1.x/administration-proxy/), it is explicitly stated that the Pulsar proxy is not designed for exposure to the public internet. The design assumes that deployments will be protected by network perimeter security measures. It is crucial to understand that relying solely on the default configuration can expose your deployment to significant security vulnerabilities. As per the [Pulsar Proxy documentation](https://pulsar.apache.org/docs/3.1.x/administration-proxy/), it is explicitly stated that the Pulsar proxy is not designed for exposure to the public internet. The design assumes that deployments will be protected by network perimeter security measures. It is crucial to understand that relying solely on the default configuration can expose your deployment to significant security vulnerabilities.
### Upgrading #### Recommendations:
#### To 4.1.0
This version introduces `OpenID` authentication. Setting `auth.authentication.provider` is no longer supported, you need to enable the provider with `auth.authentication.<provider>.enabled`.
#### To 4.0.0
The default service type for the Pulsar proxy has changed from `LoadBalancer` to `ClusterIP` for security reasons. This limits access to within the Kubernetes environment by default.
### External Access Recommendations
If you need to expose the Pulsar Proxy outside the cluster:
1. **USE INTERNAL LOAD BALANCERS ONLY**
- Set type to LoadBalancer only in secured environments with proper network controls
- Add cloud provider-specific annotations for internal load balancers:
- Kubernetes documentation about internal load balancers:
- [Internal load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer)
- See cloud provider documentation:
- AWS / EKS: [AWS Load Balancer Controller / Service Annotations](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/)
- Azure / AKS: [Use an internal load balancer with Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/internal-lb)
- GCP / GKE: [LoadBalancer service parameters](https://cloud.google.com/kubernetes-engine/docs/concepts/service-load-balancer-parameters)
- Examples (verify correctness for your environment):
- AWS / EKS: `service.beta.kubernetes.io/aws-load-balancer-internal: "true"`
- Azure / AKS: `service.beta.kubernetes.io/azure-load-balancer-internal: "true"`
- GCP / GKE: `networking.gke.io/load-balancer-type: "Internal"`
2. **IMPLEMENT AUTHENTICATION AND AUTHORIZATION**
- Configure all clients to authenticate properly
- Set up appropriate authorization policies
3. **USE TLS FOR ALL CONNECTIONS**
- Enable TLS for client-to-proxy connections
- Enable TLS for proxy-to-broker connections
- Enable TLS for all internal cluster communications
- Note: TLS alone is NOT sufficient as a security solution. Even with TLS enabled, clusters exposed to untrusted networks remain vulnerable to denial-of-service attacks, authentication bypass attempts, and protocol-level exploits.
4. **NETWORK SECURITY**
- Use private networks (VPCs)
- Configure firewalls, security groups, and IP restrictions
5. **CLIENT IP ADDRESS BASED ACCESS RESTRICTIONS**
- When using a LoadBalancer service type, restrict access to specific IP ranges by configuring `proxy.service.loadBalancerSourceRanges` in your values.yaml:
```yaml
proxy:
service:
loadBalancerSourceRanges:
- 10.0.0.0/8 # Private network range
- 172.16.0.0/12 # Private network range
- 192.168.0.0/16 # Private network range
```
- This feature:
- Provides an additional defense layer by filtering traffic at the load balancer level
- Only allows connections from specified CIDR blocks
- Works only with LoadBalancer service type and when your cloud provider supports the `loadBalancerSourceRanges` parameter
- Important: This should be implemented alongside other security measures (internal load balancer, authentication, TLS, network policies) as part of a defense-in-depth strategy,
not as a standalone security solution
### Alternative for External Access
As an alternative method for external access, Pulsar has support for [SNI proxy routing](https://pulsar.apache.org/docs/next/concepts-proxy-sni-routing/). SNI Proxy routing is supported with proxy servers such as Apache Traffic Server, HAProxy and Nginx.
Note: This option isn't currently implemented in the Apache Pulsar Helm chart.
**IMPORTANT**: Pulsar binary protocol cannot be exposed outside of the Kubernetes cluster using Kubernetes Ingress. Kubernetes Ingress works for the Admin REST API and topic lookups, but clients would be connecting to the advertised listener addresses returned by the brokers and it would only work when clients can connect directly to brokers. This is not a supported secure option for exposing Pulsar to untrusted networks.
### General Recommendations
- **Network Perimeter Security:** It is imperative to implement robust network perimeter security to safeguard your deployment. The absence of such security measures can lead to unauthorized access and potential data breaches. - **Network Perimeter Security:** It is imperative to implement robust network perimeter security to safeguard your deployment. The absence of such security measures can lead to unauthorized access and potential data breaches.
- **Restricted Access:** For environments where security is less critical, such as certain development or testing scenarios, the use of `loadBalancerSourceRanges` may be employed to restrict access to specified IP addresses or ranges. This, however, should not be considered a substitute for comprehensive security measures in production environments. - **Restricted Access:** For environments where security is less critical, such as certain development or testing scenarios, the use of `loadBalancerSourceRanges` may be employed to restrict access to specified IP addresses or ranges. This, however, should not be considered a substitute for comprehensive security measures in production environments.
### User Responsibility ### User Responsibility
The user assumes full responsibility for the security and integrity of their deployment. This includes, but is not limited to, the proper configuration of security features and adherence to best practices for securing network access. The providers of this Helm chart disclaim all warranties, whether express or implied, including any warranties of merchantability, fitness for a particular purpose, and non-infringement of third-party rights. The user assumes full responsibility for the security and integrity of their deployment. This includes, but is not limited to, the proper configuration of security features and adherence to best practices for securing network access. The providers of this Helm chart disclaim all warranties, whether express or implied, including any warranties of merchantability, fitness for a particular purpose, and non-infringement of third-party rights.
### No Security Guarantees ### No Security Guarantees
The providers of this Helm chart make no guarantees regarding the security of the chart under any circumstances. It is the user's responsibility to ensure that their deployment is secure and complies with all relevant security standards and regulations. The providers of this Helm chart make no guarantees regarding the security of the chart under any circumstances. It is the user's responsibility to ensure that their deployment is secure and complies with all relevant security standards and regulations.
By using this Helm chart, the user acknowledges the risks associated with its default configuration and the necessity for proper security customization. The user further agrees that the providers of the Helm chart shall not be liable for any security breaches or incidents resulting from the use of the chart. By using this Helm chart, the user acknowledges the risks associated with its default configuration and the necessity for proper security customization. The user further agrees that the providers of the Helm chart shall not be liable for any security breaches or incidents resulting from the use of the chart.
## Features ## Features
This Helm Chart includes all the components of Apache Pulsar for a complete experience. This Helm Chart includes all the components of Apache Pulsar for a complete experience.
@ -147,7 +61,7 @@ This Helm Chart includes all the components of Apache Pulsar for a complete expe
- [x] Management & monitoring components: - [x] Management & monitoring components:
- [x] Pulsar Manager - [x] Pulsar Manager
- [x] Optional PodMonitors for each component (enabled by default) - [x] Optional PodMonitors for each component (enabled by default)
- [x] [victoria-metrics-k8s-stack](hhttps://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack) (as of 4.0.0) - [x] [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) (as of 3.0.0)
It includes support for: It includes support for:
@ -160,10 +74,9 @@ It includes support for:
- [x] Broker - [x] Broker
- [x] Toolset - [x] Toolset
- [x] Bookie - [x] Bookie
- [x] ZooKeeper (requires the `AdditionalCertificateOutputFormats=true` feature gate to be enabled in the cert-manager deployment when using cert-manager versions below 1.15.0) - [x] ZooKeeper
- [x] Authentication - [x] Authentication
- [x] JWT - [x] JWT
- [x] OpenID
- [ ] Mutal TLS - [ ] Mutal TLS
- [ ] Kerberos - [ ] Kerberos
- [x] Authorization - [x] Authorization
@ -184,9 +97,9 @@ It includes support for:
In order to use this chart to deploy Apache Pulsar on Kubernetes, the followings are required. In order to use this chart to deploy Apache Pulsar on Kubernetes, the followings are required.
1. kubectl 1.25 or higher, compatible with your cluster ([+/- 1 minor release from your cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin)) 1. kubectl 1.23 or higher, compatible with your cluster ([+/- 1 minor release from your cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin))
2. Helm v3 (3.12.0 or higher) 2. Helm v3 (3.12.0 or higher)
3. A Kubernetes cluster, version 1.25 or higher. 3. A Kubernetes cluster, version 1.23 or higher.
## Environment setup ## Environment setup
@ -201,62 +114,26 @@ Before proceeding to deploying Pulsar, you need to prepare your environment.
To add this chart to your local Helm repository: To add this chart to your local Helm repository:
```bash ```bash
helm repo add apachepulsar https://pulsar.apache.org/charts helm repo add apache https://pulsar.apache.org/charts
helm repo update
``` ```
## Kubernetes cluster preparation ## Kubernetes cluster preparation
You need a Kubernetes cluster whose version is 1.25 or higher in order to use this chart, due to the usage of certain Kubernetes features. You need a Kubernetes cluster whose version is 1.23 or higher in order to use this chart, due to the usage of certain Kubernetes features.
We provide some instructions to guide you through the preparation: http://pulsar.apache.org/docs/helm-prepare/ We provide some instructions to guide you through the preparation: http://pulsar.apache.org/docs/helm-prepare/
## Deploy Pulsar to Kubernetes ## Deploy Pulsar to Kubernetes
1. Configure your values file. The best way to know which values are available is to read the [values.yaml](./charts/pulsar/values.yaml). 1. Configure your values file. The best way to know which values are available is to read the [values.yaml](./charts/pulsar/values.yaml).
A best practice is to start with an empty values file and only set the keys that differ from the default configuration.
Anti-affinity rules for Zookeeper and Bookie components require at least one node per replica. For Kubernetes clusters with less than 3 nodes,
you must disable this feature by adding this to your initial values.yaml file:
```yaml
affinity:
anti_affinity: false
```
2. Install the chart: 2. Install the chart:
```bash ```bash
helm install -n <namespace> --create-namespace <release-name> -f your-values.yaml apachepulsar/pulsar helm install <release-name> -n <namespace> -f your-values.yaml apache/pulsar
``` ```
3. Observe the deployment progress 3. Access the Pulsar cluster
Watching events to view progress of deployment:
```shell
kubectl get -n <namespace> events -o wide --watch
```
Watching state of deployed Kubernetes objects, updated every 2 seconds:
```shell
watch kubectl get -n <namespace> all
```
Waiting until Pulsar Proxy is available:
```shell
kubectl wait --timeout=600s --for=condition=ready pod -n <namespace> -l component=proxy
```
Watching state with k9s (https://k9scli.io/topics/install/):
```shell
k9s -n <namespace>
```
4. Access the Pulsar cluster
The default values will create a `ClusterIP` for the proxy you can use to interact with the cluster. To find the IP address of proxy use: The default values will create a `ClusterIP` for the proxy you can use to interact with the cluster. To find the IP address of proxy use:
@ -267,7 +144,7 @@ We provide some instructions to guide you through the preparation: http://pulsar
For more information, please follow our detailed For more information, please follow our detailed
[quick start guide](https://pulsar.apache.org/docs/getting-started-helm/). [quick start guide](https://pulsar.apache.org/docs/getting-started-helm/).
## Customize the deployment ## Customize the deployment
We provide a [detailed guideline](https://pulsar.apache.org/docs/helm-deploy/) for you to customize We provide a [detailed guideline](https://pulsar.apache.org/docs/helm-deploy/) for you to customize
the Helm Chart for a production-ready deployment. the Helm Chart for a production-ready deployment.
@ -283,57 +160,26 @@ You can also checkout out the example values file for different deployments.
- [Deploy a Pulsar cluster with JWT authentication using symmetric key](examples/values-jwt-symmetric.yaml) - [Deploy a Pulsar cluster with JWT authentication using symmetric key](examples/values-jwt-symmetric.yaml)
- [Deploy a Pulsar cluster with JWT authentication using asymmetric key](examples/values-jwt-asymmetric.yaml) - [Deploy a Pulsar cluster with JWT authentication using asymmetric key](examples/values-jwt-asymmetric.yaml)
## Disabling victoria-metrics-k8s-stack components ## Disabling Kube-Prometheus-Stack CRDs
In order to disable the victoria-metrics-k8s-stack, you can add the following to your `values.yaml`. In order to disable the kube-prometheus-stack fully, it is necessary to add the following to your `values.yaml`:
Victoria Metrics components can also be disabled and enabled individually if you only need specific monitoring features.
```yaml ```yaml
# disable VictoriaMetrics and related components kube-prometheus-stack:
victoria-metrics-k8s-stack:
enabled: false enabled: false
victoria-metrics-operator: prometheusOperator:
enabled: false
vmsingle:
enabled: false
vmagent:
enabled: false
kube-state-metrics:
enabled: false
prometheus-node-exporter:
enabled: false enabled: false
grafana: grafana:
enabled: false enabled: false
alertmanager:
Additionally, you'll need to set each component's `podMonitor` property to `false`.
```yaml
# disable pod monitors
autorecovery:
podMonitor:
enabled: false enabled: false
bookkeeper: prometheus:
podMonitor:
enabled: false
oxia:
server:
podMonitor:
enabled: false
coordinator:
podMonitor:
enabled: false
broker:
podMonitor:
enabled: false
proxy:
podMonitor:
enabled: false
zookeeper:
podMonitor:
enabled: false enabled: false
``` ```
This is shown in some [examples/values-disable-monitoring.yaml](examples/values-disable-monitoring.yaml). Otherwise, the helm chart installation will attempt to install the CRDs for the kube-prometheus-stack. Additionally,
you'll need to disable each of the component's `PodMonitors`. This is shown in some [examples](./examples) and is
verified in some [tests](./.ci/clusters).
## Pulsar Manager ## Pulsar Manager
@ -357,12 +203,12 @@ kubectl get secret -l component=pulsar-manager -o=jsonpath="{.items[0].data.UI_P
## Grafana Dashboards ## Grafana Dashboards
The Apache Pulsar Helm Chart uses the `victoria-metrics-k8s-stack` Helm Chart to deploy Grafana. The Apache Pulsar Helm Chart uses the `kube-prometheus-stack` Helm Chart to deploy Grafana.
There are several ways to configure Grafana dashboards. The default [`values.yaml`](charts/pulsar/values.yaml) comes with examples of Pulsar dashboards which get downloaded from the Apache-2.0 licensed [lhotari/pulsar-grafana-dashboards OSS project](https://github.com/lhotari/pulsar-grafana-dashboards) by URL. There are several ways to configure Grafana dashboards. The default `values.yaml` comes with examples of Pulsar dashboards which get downloaded from the Apache-2.0 licensed [streamnative/apache-pulsar-grafana-dashboard OSS project](https://github.com/streamnative/apache-pulsar-grafana-dashboard) by URL.
Dashboards can be configured in [`values.yaml`](charts/pulsar/values.yaml) or by adding `ConfigMap` items with the label `grafana_dashboard: "1"`. Dashboards can be configured in `values.yaml` or by adding `ConfigMap` items with the label `grafana_dashboard: "1"`.
In [`values.yaml`](charts/pulsar/values.yaml), it's possible to include dashboards by URL or by grafana.com dashboard id (`gnetId` and `revision`). In `values.yaml`, it's possible to include dashboards by URL or by grafana.com dashboard id (`gnetId` and `revision`).
Please see the [Grafana Helm chart documentation for importing dashboards](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md#import-dashboards). Please see the [Grafana Helm chart documentation for importing dashboards](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md#import-dashboards).
You can connect to Grafana by forwarding port 3000 You can connect to Grafana by forwarding port 3000
@ -390,48 +236,53 @@ Once your Pulsar Chart is installed, configuration changes and chart
updates should be done using `helm upgrade`. updates should be done using `helm upgrade`.
```bash ```bash
helm repo add apachepulsar https://pulsar.apache.org/charts helm repo add apache https://pulsar.apache.org/charts
helm repo update helm repo update
# If you are using the provided victoria-metrics-k8s-stack for monitoring, this installs or upgrades the required CRDs helm get values <pulsar-release-name> > pulsar.yaml
./scripts/victoria-metrics-k8s-stack/upgrade_vm_operator_crds.sh helm upgrade -f pulsar.yaml \
# get the existing values.yaml used for the most recent deployment <pulsar-release-name> apache/pulsar
helm get values -n <namespace> <pulsar-release-name> > values.yaml
# upgrade the deployment
helm upgrade -n <namespace> -f values.yaml <pulsar-release-name> apachepulsar/pulsar
``` ```
For more detailed information, see our [Upgrading](http://pulsar.apache.org/docs/helm-upgrade/) guide. For more detailed information, see our [Upgrading](http://pulsar.apache.org/docs/helm-upgrade/) guide.
## Upgrading to Helm chart version 4.2.0 (not released yet) ## Upgrading from Helm Chart version 3.0.0-3.6.0 to 3.7.0 version and above
### TLS configuration for ZooKeeper has changed The kube-prometheus-stack version has been upgraded to 65.x.x in Pulsar Helm Chart version 3.7.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-64x-to-65x).
The TLS configuration for ZooKeeper has been changed to fix certificate and private key expiration issues. There's a script to run the required commands:
This change impacts configurations that have `tls.enabled` and `tls.zookeeper.enabled` set in `values.yaml`.
The revised solution requires the `AdditionalCertificateOutputFormats=true` feature gate to be enabled in the `cert-manager` deployment when using cert-manager versions below 1.15.0.
If you installed `cert-manager` using `./scripts/cert-manager/install-cert-manager.sh`, you can re-run the updated script to set the feature gate. The script currently installs or upgrades cert-manager LTS version 1.12.17, where the feature gate must be explicitly enabled.
## Upgrading from Helm Chart versions before 4.0.0 to 4.0.0 version and above
### Pulsar Proxy service's default type has been changed from `LoadBalancer` to `ClusterIP`
Please check the section "External Access Recommendations" for guidance and also check the security advisory section.
You will need to configure keys under `proxy.service` in your `values.yaml` to preserve existing functionality since the default has been changed.
### kube-prometheus-stack replaced with victoria-metrics-k8s-stack
The `kube-prometheus-stack` was replaced with `victoria-metrics-k8s-stack` in Pulsar Helm chart version 4.0.0. The trigger for the change was incompatibilities discovered in testing with most recent `kube-prometheus-stack` and Prometheus 3.2.1 which failed to scrape Pulsar metrics in certain cases without providing proper error messages or debug information at debug level logging.
[Victoria Metrics](https://docs.victoriametrics.com/) is Apache 2.0 Licensed OSS and it's a fully compatible drop-in replacement for Prometheus which is fast and efficient.
Before upgrading to Pulsar Helm Chart version 4.0.0, it is recommended to disable kube-prometheus-stack in the original Helm chart version that
is used:
```shell ```shell
# get the existing values.yaml used for the most recent deployment ./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.77.1
helm get values -n <namespace> <pulsar-release-name> > values.yaml ```
# disable kube-prometheus-stack in the currently used version before upgrading to Pulsar Helm chart 4.0.0
helm upgrade -n <namespace> -f values.yaml --version <your-current-chart-version> --set kube-prometheus-stack.enabled=false <pulsar-release-name> apachepulsar/pulsar After, this you can proceed with `helm upgrade`.
## Upgrading from Helm Chart version 3.0.0-3.4.x to 3.5.0 version and above
The kube-prometheus-stack version has been upgraded to 59.x.x in Pulsar Helm Chart version 3.5.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-58x-to-59x).
There's a script to run the required commands:
```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.74.0
```
After, this you can proceed with `helm upgrade`.
## Upgrading from Helm Chart version 3.0.0-3.2.x to 3.3.0 version and above
The kube-prometheus-stack version has been upgraded to 56.x.x in Pulsar Helm Chart version 3.3.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-55x-to-56x).
There's a script to run the required commands:
```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.71.0
``` ```
After, this you can proceed with `helm upgrade`. After, this you can proceed with `helm upgrade`.
@ -441,7 +292,7 @@ After, this you can proceed with `helm upgrade`.
The 2.10.0+ Apache Pulsar docker image is a non-root container, by default. That complicates an upgrade to 2.10.0 The 2.10.0+ Apache Pulsar docker image is a non-root container, by default. That complicates an upgrade to 2.10.0
because the existing files are owned by the root user but are not writable by the root group. In order to leverage this because the existing files are owned by the root user but are not writable by the root group. In order to leverage this
new security feature, the Bookkeeper and Zookeeper StatefulSet [securityContexts](https://kubernetes.io/docs/tasks/configure-pod-container/security-context) new security feature, the Bookkeeper and Zookeeper StatefulSet [securityContexts](https://kubernetes.io/docs/tasks/configure-pod-container/security-context)
are configurable in the [`values.yaml`](charts/pulsar/values.yaml). They default to: are configurable in the `values.yaml`. They default to:
```yaml ```yaml
securityContext: securityContext:
@ -489,7 +340,6 @@ Caused by: org.rocksdb.RocksDBException: while open a file for lock: /pulsar/dat
### Recovering from `helm upgrade` error "unable to build kubernetes objects from current release manifest" ### Recovering from `helm upgrade` error "unable to build kubernetes objects from current release manifest"
Example of the error message: Example of the error message:
```bash ```bash
Error: UPGRADE FAILED: unable to build kubernetes objects from current release manifest: Error: UPGRADE FAILED: unable to build kubernetes objects from current release manifest:
[resource mapping not found for name: "pulsar-bookie" namespace: "pulsar" from "": [resource mapping not found for name: "pulsar-bookie" namespace: "pulsar" from "":
@ -541,36 +391,6 @@ We've done our best to make these charts as seamless as possible,
occasionally troubles do surface outside of our control. We've collected occasionally troubles do surface outside of our control. We've collected
tips and tricks for troubleshooting common issues. Please examine these first before raising an [issue](https://github.com/apache/pulsar-helm-chart/issues/new/choose), and feel free to add to them by raising a [Pull Request](https://github.com/apache/pulsar-helm-chart/compare)! tips and tricks for troubleshooting common issues. Please examine these first before raising an [issue](https://github.com/apache/pulsar-helm-chart/issues/new/choose), and feel free to add to them by raising a [Pull Request](https://github.com/apache/pulsar-helm-chart/compare)!
### VictoriaMetrics Troubleshooting
In example commands, k8s is namespace `pulsar` replace with your deployment namespace.
#### VictoriaMetrics Web UI
Connecting to `vmsingle` pod for web UI.
```shell
kubectl port-forward -n pulsar $(kubectl get pods -n pulsar -l app.kubernetes.io/name=vmsingle -o jsonpath='{.items[0].metadata.name}') 8429:8429
```
Now you can access the UI at http://localhost:8429 and http://localhost:8429/vmui (for similar UI as in Prometheus)
#### VictoriaMetrics Scraping debugging UI - Active Targets
Connection to `vmagent` pod for debugging targets.
```shell
kubectl port-forward -n pulsar $(kubectl get pods -n pulsar -l app.kubernetes.io/name=vmagent -o jsonpath='{.items[0].metadata.name}') 8429:8429
```
Now you can access the UI at http://localhost:8429
Active Targets UI
- http://localhost:8429/targets
Scraping Configuration
- http://localhost:8429/config
## Release Process ## Release Process
See [RELEASE.md](RELEASE.md) See [RELEASE.md](RELEASE.md)

View File

@ -87,7 +87,7 @@ official Apache releases must not include the rcN suffix.
- Tag your release - Tag your release
```shell ```shell
git tag -u $APACHE_USER@apache.org -s pulsar-${VERSION_RC} -m "Apache Pulsar Helm Chart $VERSION_RC" git tag -s pulsar-${VERSION_RC} -m "Apache Pulsar Helm Chart $VERSION_RC"
``` ```
- Tarball the repo - Tarball the repo
@ -243,16 +243,10 @@ Public keys are available at: https://www.apache.org/dist/pulsar/KEYS
For convenience "index.yaml" has been uploaded (though excluded from voting), so you can also run the below commands. For convenience "index.yaml" has been uploaded (though excluded from voting), so you can also run the below commands.
helm repo add --force-update apache-pulsar-dist-dev \\ helm repo add --force-update apache-pulsar-dist-dev https://dist.apache.org/repos/dist/dev/pulsar/helm-chart/$VERSION_RC/
https://dist.apache.org/repos/dist/dev/pulsar/helm-chart/$VERSION_RC/
helm repo update helm repo update
helm install pulsar apache-pulsar-dist-dev/pulsar \\ helm install pulsar apache-pulsar-dist-dev/pulsar --version ${VERSION_WITHOUT_RC} --set affinity.anti_affinity=false
--version ${VERSION_WITHOUT_RC} --set affinity.anti_affinity=false \\
--wait --timeout 10m --debug
For observing the deployment progress, you can use the k9s tool to view the cluster state changes in a different terminal window.
The k9s tool is available at https://k9scli.io/topics/install/.
pulsar-${VERSION_WITHOUT_RC}.tgz.prov - is also uploaded for verifying Chart Integrity, though it is not strictly required for releasing the artifact based on ASF Guidelines. pulsar-${VERSION_WITHOUT_RC}.tgz.prov - is also uploaded for verifying Chart Integrity, though it is not strictly required for releasing the artifact based on ASF Guidelines.
You can optionally verify this file using this helm plugin https://github.com/technosophos/helm-gpg, or by using helm --verify (https://helm.sh/docs/helm/helm_verify/). You can optionally verify this file using this helm plugin https://github.com/technosophos/helm-gpg, or by using helm --verify (https://helm.sh/docs/helm/helm_verify/).
@ -410,14 +404,9 @@ Contributors can run below commands to test the Helm Chart
```shell ```shell
export VERSION_RC=3.0.0-candidate-1 export VERSION_RC=3.0.0-candidate-1
export VERSION_WITHOUT_RC=${VERSION_RC%-candidate-*} export VERSION_WITHOUT_RC=${VERSION_RC%-candidate-*}
``` helm repo add --force-update apache-pulsar-dist-dev https://dist.apache.org/repos/dist/dev/pulsar/helm-chart/$VERSION_RC/
```shell
helm repo add --force-update \
apache-pulsar-dist-dev https://dist.apache.org/repos/dist/dev/pulsar/helm-chart/$VERSION_RC/
helm repo update helm repo update
helm install pulsar apache-pulsar-dist-dev/pulsar \ helm install pulsar apache-pulsar-dist-dev/pulsar --version ${VERSION_WITHOUT_RC} --set affinity.anti_affinity=false
--version ${VERSION_WITHOUT_RC} --set affinity.anti_affinity=false
``` ```
You can then perform any other verifications to check that it works as you expected by You can then perform any other verifications to check that it works as you expected by
@ -490,7 +479,9 @@ Verify that the packages appear in [Pulsar Helm Chart](https://dist.apache.org/r
Create and push the release tag: Create and push the release tag:
```shell ```shell
git tag -u $APACHE_USER@apache.org pulsar-$VERSION_WITHOUT_RC $(git rev-parse pulsar-$VERSION_RC^{}) -m "Apache Pulsar Helm Chart ${VERSION_WITHOUT_RC}" cd "${PULSAR_REPO_ROOT}"
git checkout pulsar-${VERSION_RC}
git tag -s pulsar-${VERSION_WITHOUT_RC} -m "Apache Pulsar Helm Chart ${VERSION_WITHOUT_RC}"
git push origin pulsar-${VERSION_WITHOUT_RC} git push origin pulsar-${VERSION_WITHOUT_RC}
``` ```
@ -511,7 +502,7 @@ cd pulsar-site
# Run on a branch based on main branch # Run on a branch based on main branch
cd static/charts cd static/charts
# need the chart file temporarily to update the index # need the chart file temporarily to update the index
wget https://dist.apache.org/repos/dist/release/pulsar/helm-chart/${VERSION_WITHOUT_RC}/pulsar-${VERSION_WITHOUT_RC}.tgz wget https://downloads.apache.org/pulsar/helm-chart/${VERSION_WITHOUT_RC}/pulsar-${VERSION_WITHOUT_RC}.tgz
# store the license header temporarily # store the license header temporarily
head -n 17 index.yaml > license_header.txt head -n 17 index.yaml > license_header.txt
# update the index # update the index
@ -524,29 +515,14 @@ rm license_header.txt index.yaml.new
rm pulsar-${VERSION_WITHOUT_RC}.tgz rm pulsar-${VERSION_WITHOUT_RC}.tgz
``` ```
Verify that the updated `index.yaml` file has the most recent version. Verify that the updated `index.yaml` file has the most recent version. Then run:
Wait until the file is available:
```shell
while ! curl -fIL https://downloads.apache.org/pulsar/helm-chart/${VERSION_WITHOUT_RC}/pulsar-${VERSION_WITHOUT_RC}.tgz; do
echo "Waiting for pulsar-${VERSION_WITHOUT_RC}.tgz to become available..."
sleep 10
done
```
Then run:
```shell ```shell
git add index.yaml git add index.yaml
git commit -m "Adding Pulsar Helm Chart ${VERSION_WITHOUT_RC} to index.yaml" git commit -m "Adding Pulsar Helm Chart ${VERSION_WITHOUT_RC} to index.yaml"
``` ```
Then commit the change. Then open a PR.
```
git push origin main
```
## Create release notes for the tag in GitHub UI ## Create release notes for the tag in GitHub UI

View File

@ -18,11 +18,11 @@
# #
apiVersion: v2 apiVersion: v2
appVersion: "4.0.5" appVersion: "4.0.2"
description: Apache Pulsar Helm chart for Kubernetes description: Apache Pulsar Helm chart for Kubernetes
name: pulsar name: pulsar
version: 4.1.0 version: 3.9.0
kubeVersion: ">=1.25.0-0" kubeVersion: ">=1.23.0-0"
home: https://pulsar.apache.org home: https://pulsar.apache.org
sources: sources:
- https://github.com/apache/pulsar - https://github.com/apache/pulsar
@ -32,7 +32,7 @@ maintainers:
- name: The Apache Pulsar Team - name: The Apache Pulsar Team
email: dev@pulsar.apache.org email: dev@pulsar.apache.org
dependencies: dependencies:
- name: victoria-metrics-k8s-stack - name: kube-prometheus-stack
version: 0.38.x version: 65.x.x
repository: https://victoriametrics.github.io/helm-charts/ repository: https://prometheus-community.github.io/helm-charts
condition: victoria-metrics-k8s-stack.enabled condition: kube-prometheus-stack.enabled

View File

@ -1,185 +1,18 @@
====================================================================================== Thank you for installing Apache Pulsar Helm chart version {{ .Chart.Version }}.
APACHE PULSAR HELM CHART
======================================================================================
====================================================================================== !! WARNING !!
SECURITY ADVISORY
======================================================================================
This Helm chart's default configuration DOES NOT meet production security requirements. Important Security Disclaimer for Apache Pulsar Helm Chart Usage:
Users MUST review and customize security settings for their specific environment.
IMPORTANT: This Helm chart provides a starting point for Pulsar deployments but requires This Helm chart is provided with a default configuration that does not
significant security customization before use in production environments. We strongly meet the security requirements for production environments or sensitive
recommend implementing: data handling. Users are strongly advised to thoroughly review and
customize the security settings to ensure a secure deployment that
aligns with their specific operational and security policies.
1. Proper network isolation and access controls Go to https://github.com/apache/pulsar-helm-chart for more details.
2. Authentication and authorization for all components
3. TLS encryption for all communication channels
4. Regular security updates and vulnerability assessments
As an open source project, we welcome contributions to improve security features. Ask usage questions at https://github.com/apache/pulsar/discussions/categories/q-a
Please consider submitting pull requests to address security gaps or enhance Report issues to https://github.com/apache/pulsar-helm-chart/issues
existing security implementations. Please contribute improvements to https://github.com/apache/pulsar-helm-chart
---------------------------------------------------------------------------------------
SECURITY NOTICE: The Pulsar proxy is not designed for direct public internet exposure.
It lacks security features required for untrusted networks and should only be deployed
within secured environments with proper network controls.
IMPORTANT CHANGE IN v4.0.0: Default service type changed from LoadBalancer to ClusterIP
for security reasons. This limits access to within the Kubernetes environment by default.
---------------------------------------------------------------------------------------
IF YOU NEED EXTERNAL ACCESS FOR YOUR PULSAR CLUSTER:
---------------------------------------------------------------------------------------
Note: This information might be outdated. Please go to https://github.com/apache/pulsar-helm-chart for updated information.
If you need to expose the Pulsar Proxy outside the cluster using a LoadBalancer service type:
1. USE INTERNAL LOAD BALANCERS ONLY
- Set type to LoadBalancer only in secured environments with proper network controls
- Add cloud provider-specific annotations for internal load balancers
- See cloud provider documentation:
* AWS / EKS: https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/
* Azure / AKS: https://learn.microsoft.com/en-us/azure/aks/internal-lb
* GCP / GKE: https://cloud.google.com/kubernetes-engine/docs/concepts/service-load-balancer-parameters
- Examples (verify correctness for your environment):
* AWS / EKS: service.beta.kubernetes.io/aws-load-balancer-internal: "true"
* Azure / AKS: service.beta.kubernetes.io/azure-load-balancer-internal: "true"
* GCP / GKE: networking.gke.io/load-balancer-type: "Internal"
2. IMPLEMENT AUTHENTICATION AND AUTHORIZATION
- Configure all clients to authenticate properly
- Set up appropriate authorization policies
3. USE TLS FOR ALL CONNECTIONS
- Enable TLS for client-to-proxy connections
- Enable TLS for proxy-to-broker connections
- Enable TLS for all internal cluster communications (brokers, zookeepers, bookies)
- Note: TLS alone is NOT sufficient as a security solution in Pulsar. Even with TLS enabled,
clusters exposed to untrusted networks remain vulnerable to denial-of-service attacks,
authentication bypass attempts, and protocol-level exploits. Always implement defense-in-depth
security measures and limit exposure to trusted networks only.
4. NETWORK SECURITY
- Use private networks (VPCs)
- Configure firewalls, security groups, and IP restrictions appropriately
- In addition, consider using loadBalancerSourceRanges to limit access to specific IP ranges
5. CLIENT IP ADDRESS BASED ACCESS RESTRICTIONS
- When using a LoadBalancer service type, restrict access to specific IP ranges by configuring
`proxy.service.loadBalancerSourceRanges` in your values.yaml
- Important: This should be implemented alongside other security measures (internal load balancer,
authentication, TLS, network policies) as part of a defense-in-depth strategy,
not as a standalone security solution
---------------------------------------------------------------------------------------
ALTERNATIVE FOR EXTERNAL ACCESS
---------------------------------------------------------------------------------------
As an alternative method for external access, Pulsar has support for SNI proxy routing:
https://pulsar.apache.org/docs/next/concepts-proxy-sni-routing/
SNI Proxy routing is supported with proxy servers such as Apache Traffic Server, HAProxy and Nginx.
Note: This option isn't currently implemented in the Apache Pulsar Helm chart.
IMPORTANT: Pulsar binary protocol cannot be exposed outside of the Kubernetes cluster
using Kubernetes Ingress. Kubernetes Ingress works for the Admin REST API and topic lookups,
but clients would be connecting to the advertised listener addresses returned by the brokers and it
would only work when clients can connect directly to brokers. This is not a supported secure option
for exposing Pulsar to untrusted networks.
{{- if .Values.useReleaseStatus }}
======================================================================================
🚀 QUICK START 🚀
======================================================================================
Watching events to view progress of deployment:
kubectl get -n {{ .Values.namespace | default .Release.Namespace }} events -o wide --watch
Watching state of deployed Kubernetes objects, updated every 2 seconds:
watch kubectl get -n {{ .Values.namespace | default .Release.Namespace }} all
{{- if .Values.components.proxy }}
Waiting until Pulsar Proxy is available:
kubectl wait --timeout=600s --for=condition=ready pod -n {{ .Values.namespace | default .Release.Namespace }} -l component=proxy
{{- end }}
Watching state with k9s (https://k9scli.io/topics/install/):
k9s -n {{ .Values.namespace | default .Release.Namespace }}
{{- if and .Values.affinity.anti_affinity (or (gt (int .Values.bookkeeper.replicaCount) 1) (gt (int .Values.zookeeper.replicaCount) 1)) }}
======================================================================================
⚠️ NOTICE FOR DEV K8S CLUSTER USERS ⚠️
======================================================================================
Please note that anti-affinity rules for Zookeeper and Bookie components require at least
one node per replica. There are currently {{ .Values.bookkeeper.replicaCount }} bookies and {{ .Values.zookeeper.replicaCount }} zookeepers configured.
For Kubernetes clusters with fewer than 3 nodes, such as single-node Kubernetes clusters in
development environments like minikube, Docker Desktop, Rancher Desktop (k3s), or Podman
Desktop, you must disable the anti-affinity feature by either:
Adding to your values.yaml:
affinity:
anti_affinity: false
Or adding "--set affinity.anti_affinity=false" to the helm command line.
After making the changes to your values yaml file, redeploy with "helm upgrade":
helm upgrade -n {{ .Release.Namespace }} -f your_values_file.yaml {{ .Release.Name }} apachepulsar/pulsar
These configuration instructions can be omitted for Kubernetes clusters with 3 or more nodes.
{{- end }}
{{- end }}
{{- if and (eq .Values.proxy.service.type "LoadBalancer") (not .Values.proxy.service.annotations) }}
======================================================================================
⚠️ 🚨 INSECURE CONFIGURATION DETECTED 🚨 ⚠️
======================================================================================
WARNING: You are using a LoadBalancer service type without internal load balancer
annotations. This is potentially an insecure configuration. Please carefully review
the security recommendations above and visit https://github.com/apache/pulsar-helm-chart
for more information.
======================================================================================
{{- end }}
======================================================================================
DISCLAIMER
======================================================================================
The providers of this Helm chart make no guarantees regarding the security of the chart under
any circumstances. It is the user's responsibility to ensure that their deployment is secure
and complies with all relevant security standards and regulations.
By using this Helm chart, the user acknowledges the risks associated with its default
configuration and the necessity for proper security customization. The user further
agrees that the providers of the Helm chart shall not be liable for any security breaches
or incidents resulting from the use of the chart.
The user assumes full responsibility for the security and integrity of their deployment.
This includes, but is not limited to, the proper configuration of security features and
adherence to best practices for securing network access. The providers of this Helm chart
disclaim all warranties, whether express or implied, including any warranties of
merchantability, fitness for a particular purpose, and non-infringement of third-party rights.
======================================================================================
RESOURCES
======================================================================================
- 🖥️ Install k9s terminal interface for viewing and managing k8s clusters: https://k9scli.io/topics/install/
- ❓ Usage Questions: https://github.com/apache/pulsar/discussions/categories/q-a
- 🐛 Report Issues: https://github.com/apache/pulsar-helm-chart/issues
- 🔒 Security Issues: https://pulsar.apache.org/security/
- 📚 Documentation: https://github.com/apache/pulsar-helm-chart
🌟 Please contribute to improve the Apache Pulsar Helm chart and its documentation:
- 🤝 Contribute: https://github.com/apache/pulsar-helm-chart
Thank you for installing Apache Pulsar Helm chart version {{ .Chart.Version }}.

View File

@ -36,7 +36,7 @@ Define autorecovery zookeeper client tls settings
*/}} */}}
{{- define "pulsar.autorecovery.zookeeper.tls.settings" -}} {{- define "pulsar.autorecovery.zookeeper.tls.settings" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- include "pulsar.component.zookeeper.tls.settings" (dict "component" "autorecovery" "isClient" true "isCacerts" .Values.tls.autorecovery.cacerts.enabled) -}} /pulsar/keytool/keytool.sh autorecovery {{ template "pulsar.autorecovery.hostname" . }} true;
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -51,21 +51,11 @@ Define autorecovery tls certs mounts
- name: ca - name: ca
mountPath: "/pulsar/certs/ca" mountPath: "/pulsar/certs/ca"
readOnly: true readOnly: true
{{- if .Values.tls.zookeeper.enabled }}
- name: keytool
mountPath: "/pulsar/keytool/keytool.sh"
subPath: keytool.sh
{{- end }} {{- end }}
{{- if .Values.tls.autorecovery.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: autorecovery-cacerts
{{- range $cert := .Values.tls.autorecovery.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -82,34 +72,25 @@ Define autorecovery tls certs volumes
path: tls.crt path: tls.crt
- key: tls.key - key: tls.key
path: tls.key path: tls.key
- key: tls-combined.pem
path: tls-combined.pem
- name: ca - name: ca
secret: secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" {{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items: items:
- key: ca.crt - key: ca.crt
path: ca.crt path: ca.crt
{{- end }} {{- if .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.autorecovery.cacerts.enabled }} - name: keytool
- name: autorecovery-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.autorecovery.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap: configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts" name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
defaultMode: 0755 defaultMode: 0755
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}
{{/* {{/*
Define autorecovery init container : verify cluster id Define autorecovery init container : verify cluster id
@ -117,7 +98,7 @@ Define autorecovery init container : verify cluster id
{{- define "pulsar.autorecovery.init.verify_cluster_id" -}} {{- define "pulsar.autorecovery.init.verify_cluster_id" -}}
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
export BOOKIE_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
{{- include "pulsar.autorecovery.zookeeper.tls.settings" . }} {{- include "pulsar.autorecovery.zookeeper.tls.settings" . -}}
until timeout 15 bin/bookkeeper shell whatisinstanceid; do until timeout 15 bin/bookkeeper shell whatisinstanceid; do
sleep 3; sleep 3;
done; done;

View File

@ -37,7 +37,7 @@ Define bookie zookeeper client tls settings
*/}} */}}
{{- define "pulsar.bookkeeper.zookeeper.tls.settings" -}} {{- define "pulsar.bookkeeper.zookeeper.tls.settings" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- include "pulsar.component.zookeeper.tls.settings" (dict "component" "bookie" "isClient" true "isCacerts" .Values.tls.bookie.cacerts.enabled) -}} /pulsar/keytool/keytool.sh bookie {{ template "pulsar.bookkeeper.hostname" . }} true;
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -45,30 +45,18 @@ Define bookie zookeeper client tls settings
Define bookie tls certs mounts Define bookie tls certs mounts
*/}} */}}
{{- define "pulsar.bookkeeper.certs.volumeMounts" -}} {{- define "pulsar.bookkeeper.certs.volumeMounts" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }}
{{- if or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled }}
- name: bookie-certs - name: bookie-certs
mountPath: "/pulsar/certs/bookie" mountPath: "/pulsar/certs/bookie"
readOnly: true readOnly: true
{{- end }}
- name: ca - name: ca
mountPath: "/pulsar/certs/ca" mountPath: "/pulsar/certs/ca"
readOnly: true readOnly: true
{{- if .Values.tls.zookeeper.enabled }}
- name: keytool
mountPath: "/pulsar/keytool/keytool.sh"
subPath: keytool.sh
{{- end }} {{- end }}
{{- if .Values.tls.bookie.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: bookie-cacerts
{{- range $cert := .Values.tls.bookie.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -76,8 +64,7 @@ Define bookie tls certs mounts
Define bookie tls certs volumes Define bookie tls certs volumes
*/}} */}}
{{- define "pulsar.bookkeeper.certs.volumes" -}} {{- define "pulsar.bookkeeper.certs.volumes" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }}
{{- if or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled }}
- name: bookie-certs - name: bookie-certs
secret: secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.bookie.cert_name }}" secretName: "{{ .Release.Name }}-{{ .Values.tls.bookie.cert_name }}"
@ -86,67 +73,36 @@ Define bookie tls certs volumes
path: tls.crt path: tls.crt
- key: tls.key - key: tls.key
path: tls.key path: tls.key
{{- if .Values.tls.zookeeper.enabled }}
- key: tls-combined.pem
path: tls-combined.pem
{{- end }}
{{- end }}
- name: ca - name: ca
secret: secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" {{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items: items:
- key: ca.crt - key: ca.crt
path: ca.crt path: ca.crt
{{- end }} {{- if .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.bookie.cacerts.enabled }} - name: keytool
- name: bookie-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.bookie.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap: configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts" name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
defaultMode: 0755 defaultMode: 0755
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}
{{/* {{/*
Define bookie common config Define bookie common config
*/}} */}}
{{- define "pulsar.bookkeeper.config.common" -}} {{- define "pulsar.bookkeeper.config.common" -}}
{{/*
Configure BookKeeper's metadata store (available since BookKeeper 4.7.0 / BP-29)
https://bookkeeper.apache.org/bps/BP-29-metadata-store-api-module/
https://bookkeeper.apache.org/docs/deployment/manual#cluster-metadata-setup
*/}}
# Set empty values for zkServers and zkLedgersRootPath since we're using the metadataServiceUri to configure BookKeeper's metadata store
zkServers: ""
zkLedgersRootPath: ""
{{- if .Values.components.zookeeper }} {{- if .Values.components.zookeeper }}
{{- if (and (hasKey .Values.pulsar_metadata "bookkeeper") .Values.pulsar_metadata.bookkeeper.usePulsarMetadataBookieDriver) }} zkServers: "{{ template "pulsar.zookeeper.connect" . }}"
# there's a bug when using PulsarMetadataBookieDriver since it always appends /ledgers to the metadataServiceUri zkLedgersRootPath: "{{ .Values.metadataPrefix }}/ledgers"
# Possibly a bug in org.apache.pulsar.metadata.bookkeeper.AbstractMetadataDriver#resolveLedgersRootPath in Pulsar code base
metadataServiceUri: "metadata-store:zk:{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}"
{{- else }}
# use zk+hierarchical:// when using BookKeeper's built-in metadata driver
metadataServiceUri: "zk+hierarchical://{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}/ledgers"
{{- end }}
{{- else if .Values.components.oxia }} {{- else if .Values.components.oxia }}
metadataServiceUri: "{{ template "pulsar.oxia.metadata.url.bookkeeper" . }}" metadataServiceUri: "{{ template "pulsar.oxia.metadata.url.bookkeeper" . }}"
{{- end }} {{- end }}
{{- /* metadataStoreSessionTimeoutMillis maps to zkTimeout in bookkeeper.conf for both zookeeper and oxia metadata stores */}}
{{- if (and (hasKey .Values.pulsar_metadata "bookkeeper") (hasKey .Values.pulsar_metadata.bookkeeper "metadataStoreSessionTimeoutMillis")) }}
zkTimeout: "{{ .Values.pulsar_metadata.bookkeeper.metadataStoreSessionTimeoutMillis }}"
{{- end }}
# enable bookkeeper http server # enable bookkeeper http server
httpServerEnabled: "true" httpServerEnabled: "true"
httpServerPort: "{{ .Values.bookkeeper.ports.http }}" httpServerPort: "{{ .Values.bookkeeper.ports.http }}"
@ -166,7 +122,7 @@ PULSAR_PREFIX_tlsCertificatePath: /pulsar/certs/bookie/tls.crt
PULSAR_PREFIX_tlsKeyStoreType: PEM PULSAR_PREFIX_tlsKeyStoreType: PEM
PULSAR_PREFIX_tlsKeyStore: /pulsar/certs/bookie/tls.key PULSAR_PREFIX_tlsKeyStore: /pulsar/certs/bookie/tls.key
PULSAR_PREFIX_tlsTrustStoreType: PEM PULSAR_PREFIX_tlsTrustStoreType: PEM
PULSAR_PREFIX_tlsTrustStore: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.bookie.cacerts.enabled | quote }} PULSAR_PREFIX_tlsTrustStore: /pulsar/certs/ca/ca.crt
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -177,7 +133,7 @@ Define bookie init container : verify cluster id
{{- if not (and .Values.volumes.persistence .Values.bookkeeper.volumes.persistence) }} {{- if not (and .Values.volumes.persistence .Values.bookkeeper.volumes.persistence) }}
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
export BOOKIE_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
{{- include "pulsar.bookkeeper.zookeeper.tls.settings" . }} {{- include "pulsar.bookkeeper.zookeeper.tls.settings" . -}}
until timeout 15 bin/bookkeeper shell whatisinstanceid; do until timeout 15 bin/bookkeeper shell whatisinstanceid; do
sleep 3; sleep 3;
done; done;
@ -187,7 +143,7 @@ bin/bookkeeper shell bookieformat -nonInteractive -force -deleteCookie || true
set -e; set -e;
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
export BOOKIE_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
{{- include "pulsar.bookkeeper.zookeeper.tls.settings" . }} {{- include "pulsar.bookkeeper.zookeeper.tls.settings" . -}}
until timeout 15 bin/bookkeeper shell whatisinstanceid; do until timeout 15 bin/bookkeeper shell whatisinstanceid; do
sleep 3; sleep 3;
done; done;

View File

@ -43,7 +43,7 @@ Define broker zookeeper client tls settings
*/}} */}}
{{- define "pulsar.broker.zookeeper.tls.settings" -}} {{- define "pulsar.broker.zookeeper.tls.settings" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- include "pulsar.component.zookeeper.tls.settings" (dict "component" "broker" "isClient" true "isCacerts" .Values.tls.broker.cacerts.enabled) -}} /pulsar/keytool/keytool.sh broker {{ template "pulsar.broker.hostname" . }} true;
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -51,30 +51,18 @@ Define broker zookeeper client tls settings
Define broker tls certs mounts Define broker tls certs mounts
*/}} */}}
{{- define "pulsar.broker.certs.volumeMounts" -}} {{- define "pulsar.broker.certs.volumeMounts" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled (or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled)) }}
{{- if or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }}
- name: broker-certs - name: broker-certs
mountPath: "/pulsar/certs/broker" mountPath: "/pulsar/certs/broker"
readOnly: true readOnly: true
{{- end }}
- name: ca - name: ca
mountPath: "/pulsar/certs/ca" mountPath: "/pulsar/certs/ca"
readOnly: true readOnly: true
{{- if .Values.tls.zookeeper.enabled }}
- name: keytool
mountPath: "/pulsar/keytool/keytool.sh"
subPath: keytool.sh
{{- end }} {{- end }}
{{- if .Values.tls.broker.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: broker-cacerts
{{- range $cert := .Values.tls.broker.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -82,8 +70,7 @@ Define broker tls certs mounts
Define broker tls certs volumes Define broker tls certs volumes
*/}} */}}
{{- define "pulsar.broker.certs.volumes" -}} {{- define "pulsar.broker.certs.volumes" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled (or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled)) }}
{{- if or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }}
- name: broker-certs - name: broker-certs
secret: secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.broker.cert_name }}" secretName: "{{ .Release.Name }}-{{ .Values.tls.broker.cert_name }}"
@ -92,34 +79,22 @@ Define broker tls certs volumes
path: tls.crt path: tls.crt
- key: tls.key - key: tls.key
path: tls.key path: tls.key
{{- if .Values.tls.zookeeper.enabled }}
- key: tls-combined.pem
path: tls-combined.pem
{{- end }}
{{- end }}
- name: ca - name: ca
secret: secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" {{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items: items:
- key: ca.crt - key: ca.crt
path: ca.crt path: ca.crt
{{- end }} {{- if .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.broker.cacerts.enabled }} - name: keytool
- name: broker-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.broker.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap: configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts" name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
defaultMode: 0755 defaultMode: 0755
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}

View File

@ -1,132 +0,0 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{/*
Define the pulsar certs ca issuer name
*/}}
{{- define "pulsar.certs.issuers.ca.name" -}}
{{- if .Values.certs.internal_issuer.enabled -}}
{{- if and (eq .Values.certs.internal_issuer.type "selfsigning") .Values.certs.issuers.selfsigning.name -}}
{{- .Values.certs.issuers.selfsigning.name -}}
{{- else if and (eq .Values.certs.internal_issuer.type "ca") .Values.certs.issuers.ca.name -}}
{{- .Values.certs.issuers.ca.name -}}
{{- else -}}
{{- template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer
{{- end -}}
{{- else -}}
{{- if .Values.certs.issuers.ca.name -}}
{{- .Values.certs.issuers.ca.name -}}
{{- else -}}
{{- fail "certs.issuers.ca.name is required when TLS is enabled and certs.internal_issuer.enabled is false" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Define the pulsar certs ca issuer secret name
*/}}
{{- define "pulsar.certs.issuers.ca.secretName" -}}
{{- if .Values.certs.internal_issuer.enabled -}}
{{- if and (eq .Values.certs.internal_issuer.type "selfsigning") .Values.certs.issuers.selfsigning.secretName -}}
{{- .Values.certs.issuers.selfsigning.secretName -}}
{{- else if and (eq .Values.certs.internal_issuer.type "ca") .Values.certs.issuers.ca.secretName -}}
{{- .Values.certs.issuers.ca.secretName -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name .Values.tls.ca_suffix -}}
{{- end -}}
{{- else -}}
{{- if .Values.certs.issuers.ca.secretName -}}
{{- .Values.certs.issuers.ca.secretName -}}
{{- else -}}
{{- fail "certs.issuers.ca.secretName is required when TLS is enabled and certs.internal_issuer.enabled is false" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common certificate template
Usage: {{- include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.proxy "tlsConfig" .Values.tls.proxy) -}}
*/}}
{{- define "pulsar.cert.template" -}}
{{- if eq .root.Values.certs.internal_issuer.apiVersion "cert-manager.io/v1beta1" -}}
{{- fail "cert-manager.io/v1beta1 is no longer supported. Please set certs.internal_issuer.apiVersion to cert-manager.io/v1" -}}
{{- end -}}
apiVersion: "{{ .root.Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" .root }}-{{ .tlsConfig.cert_name }}"
namespace: {{ template "pulsar.namespace" .root }}
labels:
{{- include "pulsar.standardLabels" .root | nindent 4 }}
spec:
# Secret names are always required.
secretName: "{{ .root.Release.Name }}-{{ .tlsConfig.cert_name }}"
{{- if .root.Values.tls.zookeeper.enabled }}
additionalOutputFormats:
- type: CombinedPEM
{{- end }}
duration: "{{ .root.Values.tls.common.duration }}"
renewBefore: "{{ .root.Values.tls.common.renewBefore }}"
subject:
organizations:
{{ toYaml .root.Values.tls.common.organization | indent 4 }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" .root }}-{{ .componentConfig.component }}"
isCA: false
privateKey:
size: {{ .root.Values.tls.common.keySize }}
algorithm: {{ .root.Values.tls.common.keyAlgorithm }}
encoding: {{ .root.Values.tls.common.keyEncoding }}
usages:
- server auth
- client auth
# At least one of a DNS Name, USI SAN, or IP address is required.
dnsNames:
{{- if .tlsConfig.dnsNames }}
{{ toYaml .tlsConfig.dnsNames | indent 4 }}
{{- end }}
- {{ printf "*.%s-%s.%s.svc.%s" (include "pulsar.fullname" .root) .componentConfig.component (include "pulsar.namespace" .root) .root.Values.clusterDomain | quote }}
- {{ printf "%s-%s" (include "pulsar.fullname" .root) .componentConfig.component | quote }}
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.certs.issuers.ca.name" .root }}"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
{{- end -}}
{{/*
CA certificates template
Usage: {{ include "pulsar.certs.cacerts" (dict "certs" .Values.tls.<component>.cacerts.certs) }}
*/}}
{{- define "pulsar.certs.cacerts" -}}
{{- $certs := .certs -}}
{{- $cacerts := list -}}
{{- $cacerts = print "/pulsar/certs/ca/ca.crt" | append $cacerts -}}
{{- range $cert := $certs -}}
{{- range $key := $cert.secretKeys -}}
{{- $cacerts = print "/pulsar/certs/" $cert.name "/" $key | append $cacerts -}}
{{- end -}}
{{- end -}}
{{ join " " $cacerts }}
{{- end -}}

View File

@ -1,97 +0,0 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{- define "pulsar.podMonitor" -}}
{{- $root := index . 0 }}
{{- $component := index . 1 }}
{{- $matchLabel := index . 2 }}
{{- $portName := "http" }}
{{- if gt (len .) 3 }}
{{- $portName = index . 3 }}
{{- end }}
{{/* Extract component parts for nested values */}}
{{- $componentParts := splitList "." $component }}
{{- $valuesPath := $root.Values }}
{{- range $componentParts }}
{{- $valuesPath = index $valuesPath . }}
{{- end }}
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
{{- else }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
{{- end }}
metadata:
name: {{ template "pulsar.fullname" $root }}-{{ replace "." "-" $component }}
labels:
{{- include "pulsar.standardLabels" $root | nindent 4 }}
spec:
jobLabel: {{ replace "." "-" $component }}
podMetricsEndpoints:
- port: {{ $portName }}
path: /metrics
scheme: http
interval: {{ $valuesPath.podMonitor.interval }}
scrapeTimeout: {{ $valuesPath.podMonitor.scrapeTimeout }}
# Set honor labels to true to allow overriding namespace label with Pulsar's namespace label
honorLabels: true
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
relabelConfigs:
{{- else }}
relabelings:
{{- end }}
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if or $valuesPath.podMonitor.metricRelabelings (and $valuesPath.podMonitor.dropUnderscoreCreatedMetrics (index $valuesPath.podMonitor.dropUnderscoreCreatedMetrics "enabled")) }}
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
metricRelabelConfigs:
{{- else }}
metricRelabelings:
{{- end }}
{{- if and $valuesPath.podMonitor.dropUnderscoreCreatedMetrics (index $valuesPath.podMonitor.dropUnderscoreCreatedMetrics "enabled") }}
# Drop metrics that end with _created, auto-created by metrics library to match OpenMetrics format
- sourceLabels: [__name__]
{{- if and (hasKey $valuesPath.podMonitor.dropUnderscoreCreatedMetrics "excludePatterns") $valuesPath.podMonitor.dropUnderscoreCreatedMetrics.excludePatterns }}
regex: "(?!{{ $valuesPath.podMonitor.dropUnderscoreCreatedMetrics.excludePatterns | join "|" }}).*_created$"
{{- else }}
regex: ".*_created$"
{{- end }}
action: drop
{{- end }}
{{- with $valuesPath.podMonitor.metricRelabelings }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" $root | nindent 6 }}
{{ $matchLabel }}
{{- end -}}

View File

@ -106,11 +106,7 @@ Define coordinator entrypoint
{{- define "oxia.coordinator.entrypoint" -}} {{- define "oxia.coordinator.entrypoint" -}}
- "oxia" - "oxia"
- "coordinator" - "coordinator"
{{- if .Values.oxia.coordinator.customConfigMapName }}
- "--conf=configmap:{{ template "pulsar.namespace" . }}/{{ .Values.oxia.coordinator.customConfigMapName }}"
{{- else }}
- "--conf=configmap:{{ template "pulsar.namespace" . }}/{{ template "pulsar.fullname" . }}-{{ .Values.oxia.component }}-coordinator" - "--conf=configmap:{{ template "pulsar.namespace" . }}/{{ template "pulsar.fullname" . }}-{{ .Values.oxia.component }}-coordinator"
{{- end }}
- "--log-json" - "--log-json"
- "--metadata=configmap" - "--metadata=configmap"
- "--k8s-namespace={{ template "pulsar.namespace" . }}" - "--k8s-namespace={{ template "pulsar.namespace" . }}"

View File

@ -1,95 +0,0 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{/*
Define proxy tls certs mounts
*/}}
{{- define "pulsar.proxy.certs.volumeMounts" -}}
{{- if .Values.tls.enabled }}
{{- if .Values.tls.proxy.enabled }}
- mountPath: "/pulsar/certs/proxy"
name: proxy-certs
readOnly: true
{{- end }}
- mountPath: "/pulsar/certs/ca"
name: ca
readOnly: true
{{- end }}
{{- if .Values.tls.proxy.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: proxy-cacerts
{{- range $cert := .Values.tls.proxy.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }}
{{- end }}
{{/*
Define proxy tls certs volumes
*/}}
{{- define "pulsar.proxy.certs.volumes" -}}
{{- if .Values.tls.enabled }}
{{- if .Values.tls.proxy.enabled }}
- name: proxy-certs
secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.proxy.cert_name }}"
items:
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key
{{- if .Values.tls.zookeeper.enabled }}
- key: tls-combined.pem
path: tls-combined.pem
{{- end }}
{{- end }}
- name: ca
secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}"
items:
- key: ca.crt
path: ca.crt
{{- end }}
{{- if .Values.tls.proxy.cacerts.enabled }}
- name: proxy-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.proxy.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts"
defaultMode: 0755
{{- end }}
{{- end }}

View File

@ -36,7 +36,7 @@ Define toolset zookeeper client tls settings
*/}} */}}
{{- define "pulsar.toolset.zookeeper.tls.settings" -}} {{- define "pulsar.toolset.zookeeper.tls.settings" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled -}} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled -}}
{{- include "pulsar.component.zookeeper.tls.settings" (dict "component" "toolset" "isClient" true "isCacerts" .Values.tls.toolset.cacerts.enabled) -}} /pulsar/keytool/keytool.sh toolset {{ template "pulsar.toolset.hostname" . }} true;
{{- end -}} {{- end -}}
{{- end }} {{- end }}
@ -44,30 +44,18 @@ Define toolset zookeeper client tls settings
Define toolset tls certs mounts Define toolset tls certs mounts
*/}} */}}
{{- define "pulsar.toolset.certs.volumeMounts" -}} {{- define "pulsar.toolset.certs.volumeMounts" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.zookeeper.enabled }}
- name: toolset-certs - name: toolset-certs
mountPath: "/pulsar/certs/toolset" mountPath: "/pulsar/certs/toolset"
readOnly: true readOnly: true
{{- end }}
- name: ca - name: ca
mountPath: "/pulsar/certs/ca" mountPath: "/pulsar/certs/ca"
readOnly: true readOnly: true
{{- if .Values.tls.zookeeper.enabled }}
- name: keytool
mountPath: "/pulsar/keytool/keytool.sh"
subPath: keytool.sh
{{- end }} {{- end }}
{{- if .Values.tls.toolset.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: toolset-cacerts
{{- range $cert := .Values.tls.toolset.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }} {{- end }}
{{- end }} {{- end }}
@ -75,8 +63,7 @@ Define toolset tls certs mounts
Define toolset tls certs volumes Define toolset tls certs volumes
*/}} */}}
{{- define "pulsar.toolset.certs.volumes" -}} {{- define "pulsar.toolset.certs.volumes" -}}
{{- if .Values.tls.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.zookeeper.enabled }}
- name: toolset-certs - name: toolset-certs
secret: secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.toolset.cert_name }}" secretName: "{{ .Release.Name }}-{{ .Values.tls.toolset.cert_name }}"
@ -85,32 +72,22 @@ Define toolset tls certs volumes
path: tls.crt path: tls.crt
- key: tls.key - key: tls.key
path: tls.key path: tls.key
- key: tls-combined.pem
path: tls-combined.pem
{{- end }}
- name: ca - name: ca
secret: secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" {{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items: items:
- key: ca.crt - key: ca.crt
path: ca.crt path: ca.crt
{{- end }} {{- if .Values.tls.zookeeper.enabled }}
{{- if .Values.tls.toolset.cacerts.enabled }} - name: keytool
- name: toolset-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.toolset.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap: configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts" name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
defaultMode: 0755 defaultMode: 0755
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}

View File

@ -1,37 +0,0 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{/*
Renders a value that contains template perhaps with scope if the scope is present.
Usage:
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
*/}}
{{- define "common.tplvalues.render" -}}
{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
{{- if contains "{{" (toJson .value) }}
{{- if .scope }}
{{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
{{- else }}
{{- tpl $value .context }}
{{- end }}
{{- else }}
{{- $value }}
{{- end }}
{{- end -}}

View File

@ -1,25 +0,0 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{/*
Check deprecated setting auth.authentication.provider since 4.1.0
*/}}
{{- if (and .Values.auth.authentication.enabled (not (empty .Values.auth.authentication.provider))) }}
{{- fail "ERROR: Setting auth.authentication.provider is no longer supported. For details, see the migration guide in README.md." }}
{{- end }}

View File

@ -53,93 +53,7 @@ Define zookeeper tls settings
*/}} */}}
{{- define "pulsar.zookeeper.tls.settings" -}} {{- define "pulsar.zookeeper.tls.settings" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
{{- include "pulsar.component.zookeeper.tls.settings" (dict "component" "zookeeper" "isClient" false "isCacerts" .Values.tls.zookeeper.cacerts.enabled) -}} /pulsar/keytool/keytool.sh zookeeper {{ template "pulsar.zookeeper.hostname" . }} false;
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "pulsar.component.zookeeper.tls.settings" }}
{{- $component := .component -}}
{{- $isClient := .isClient -}}
{{- $keyFile := printf "/pulsar/certs/%s/tls-combined.pem" $component -}}
{{- $caFile := ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .isCacerts -}}
{{- if $isClient }}
echo $'\n' >> conf/pulsar_env.sh
echo "PULSAR_EXTRA_OPTS=\"\${PULSAR_EXTRA_OPTS} -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.client.certReload=true -Dzookeeper.ssl.keyStore.location={{- $keyFile }} -Dzookeeper.ssl.keyStore.type=PEM -Dzookeeper.ssl.trustStore.location={{- $caFile }} -Dzookeeper.ssl.trustStore.type=PEM\"" >> conf/pulsar_env.sh
echo $'\n' >> conf/bkenv.sh
echo "BOOKIE_EXTRA_OPTS=\"\${BOOKIE_EXTRA_OPTS} -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.client.certReload=true -Dzookeeper.ssl.keyStore.location={{- $keyFile }} -Dzookeeper.ssl.keyStore.type=PEM -Dzookeeper.ssl.trustStore.location={{- $caFile }} -Dzookeeper.ssl.trustStore.type=PEM\"" >> conf/bkenv.sh
{{- else }}
echo $'\n' >> conf/pulsar_env.sh
echo "PULSAR_EXTRA_OPTS=\"\${PULSAR_EXTRA_OPTS} -Dzookeeper.ssl.keyStore.location={{- $keyFile }} -Dzookeeper.ssl.keyStore.type=PEM -Dzookeeper.ssl.trustStore.location={{- $caFile }} -Dzookeeper.ssl.trustStore.type=PEM\"" >> conf/pulsar_env.sh
{{- end }}
{{- end }}
{{/*
Define zookeeper tls certs mounts
*/}}
{{- define "pulsar.zookeeper.certs.volumeMounts" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
- mountPath: "/pulsar/certs/zookeeper"
name: zookeeper-certs
readOnly: true
- mountPath: "/pulsar/certs/ca"
name: ca
readOnly: true
{{- end }}
{{- if .Values.tls.zookeeper.cacerts.enabled }}
- mountPath: "/pulsar/certs/cacerts"
name: zookeeper-cacerts
{{- range $cert := .Values.tls.zookeeper.cacerts.certs }}
- name: {{ $cert.name }}
mountPath: "/pulsar/certs/{{ $cert.name }}"
readOnly: true
{{- end }}
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem.sh"
subPath: certs-combine-pem.sh
- name: certs-scripts
mountPath: "/pulsar/bin/certs-combine-pem-infinity.sh"
subPath: certs-combine-pem-infinity.sh
{{- end }}
{{- end }}
{{/*
Define zookeeper tls certs volumes
*/}}
{{- define "pulsar.zookeeper.certs.volumes" -}}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
- name: zookeeper-certs
secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.zookeeper.cert_name }}"
items:
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key
- key: tls-combined.pem
path: tls-combined.pem
- name: ca
secret:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}"
items:
- key: ca.crt
path: ca.crt
{{- end }}
{{- if .Values.tls.zookeeper.cacerts.enabled }}
- name: zookeeper-cacerts
emptyDir: {}
{{- range $cert := .Values.tls.zookeeper.cacerts.certs }}
- name: {{ $cert.name }}
secret:
secretName: "{{ $cert.existingSecret }}"
items:
{{- range $key := $cert.secretKeys }}
- key: {{ $key }}
path: {{ $key }}
{{- end }}
{{- end }}
- name: certs-scripts
configMap:
name: "{{ template "pulsar.fullname" . }}-certs-scripts"
defaultMode: 0755
{{- end }}
{{- end }}

View File

@ -17,7 +17,42 @@
# under the License. # under the License.
# #
# deploy autorecovery PodMonitor only when `$.Values.autorecovery.podMonitor.enabled` is true # deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
{{- if $.Values.autorecovery.podMonitor.enabled }} {{- if $.Values.autorecovery.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "autorecovery" (printf "component: %s" .Values.autorecovery.component)) }} apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "pulsar.name" . }}-recovery
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: recovery
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.autorecovery.podMonitor.interval }}
scrapeTimeout: {{ $.Values.autorecovery.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.autorecovery.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.autorecovery.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: {{ .Values.autorecovery.component }}
{{- end }} {{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end }}

View File

@ -26,10 +26,6 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.autorecovery.component }} component: {{ .Values.autorecovery.component }}
{{- with .Values.autorecovery.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec: spec:
ports: ports:
- name: http - name: http

View File

@ -23,7 +23,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.autorecovery.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.autorecovery.component }} component: {{ .Values.autorecovery.component }}
@ -44,10 +43,8 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.autorecovery.component }} component: {{ .Values.autorecovery.component }}
annotations: annotations:
{{- if not .Values.autorecovery.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.autorecovery.ports.http }}" prometheus.io/port: "{{ .Values.autorecovery.ports.http }}"
{{- end }}
{{- if .Values.autorecovery.restartPodsOnConfigMapChange }} {{- if .Values.autorecovery.restartPodsOnConfigMapChange }}
checksum/config: {{ include (print $.Template.BasePath "/autorecovery-configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/autorecovery-configmap.yaml") . | sha256sum }}
{{- end }} {{- end }}
@ -113,18 +110,6 @@ spec:
terminationGracePeriodSeconds: {{ .Values.autorecovery.gracePeriod }} terminationGracePeriodSeconds: {{ .Values.autorecovery.gracePeriod }}
serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}" serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
initContainers: initContainers:
{{- if .Values.tls.autorecovery.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.autorecovery "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.autorecovery "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.autorecovery.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.autorecovery.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if and .Values.autorecovery.waitBookkeeperTimeout (gt (.Values.autorecovery.waitBookkeeperTimeout | int) 0) }} {{- if and .Values.autorecovery.waitBookkeeperTimeout (gt (.Values.autorecovery.waitBookkeeperTimeout | int) 0) }}
# This initContainer will wait for bookkeeper initnewcluster to complete # This initContainer will wait for bookkeeper initnewcluster to complete
# before deploying the bookies # before deploying the bookies
@ -134,15 +119,12 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.autorecovery.waitBookkeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.autorecovery.waitBookkeeperTimeout }}", "sh", "-c"]
args: args:
- | - >
{{- include "pulsar.autorecovery.init.verify_cluster_id" . | nindent 10 }} {{- include "pulsar.autorecovery.init.verify_cluster_id" . | nindent 10 }}
envFrom: envFrom:
- configMapRef: - configMapRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
volumeMounts: volumeMounts:
{{- if .Values.autorecovery.extraVolumeMounts }}
{{ toYaml .Values.autorecovery.extraVolumeMounts | indent 8 }}
{{- end }}
{{- include "pulsar.autorecovery.certs.volumeMounts" . | nindent 8 }} {{- include "pulsar.autorecovery.certs.volumeMounts" . | nindent 8 }}
{{- end }} {{- end }}
{{- if .Values.autorecovery.initContainers }} {{- if .Values.autorecovery.initContainers }}
@ -156,14 +138,13 @@ spec:
resources: resources:
{{ toYaml .Values.autorecovery.resources | indent 10 }} {{ toYaml .Values.autorecovery.resources | indent 10 }}
{{- end }} {{- end }}
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end}}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
{{- if .Values.tls.autorecovery.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.autorecovery.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
{{- include "pulsar.autorecovery.zookeeper.tls.settings" . | nindent 10 }} {{- include "pulsar.autorecovery.zookeeper.tls.settings" . | nindent 10 }}
OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/bookkeeper autorecovery OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/bookkeeper autorecovery
@ -177,9 +158,6 @@ spec:
{{- include "pulsar.autorecovery.certs.volumeMounts" . | nindent 8 }} {{- include "pulsar.autorecovery.certs.volumeMounts" . | nindent 8 }}
volumes: volumes:
{{- include "pulsar.autorecovery.certs.volumes" . | nindent 6 }} {{- include "pulsar.autorecovery.certs.volumes" . | nindent 6 }}
{{- if .Values.autorecovery.extraVolumes }}
{{ toYaml .Values.autorecovery.extraVolumes | indent 6 }}
{{- end }}
{{- include "pulsar.imagePullSecrets" . | nindent 6}} {{- include "pulsar.imagePullSecrets" . | nindent 6}}
{{- end }} {{- end }}

View File

@ -33,14 +33,10 @@ spec:
ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }} ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }}
{{- end }} {{- end }}
template: template:
metadata:
labels:
{{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.bookkeeper.component }}-init
spec: spec:
{{- include "pulsar.imagePullSecrets" . | nindent 6 }} {{- include "pulsar.imagePullSecrets" . | nindent 6 }}
serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}" serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
{{- if .Values.pulsar_metadata.nodeSelector }} {{- with .Values.pulsar_metadata.nodeSelector }}
nodeSelector: nodeSelector:
{{ toYaml .Values.pulsar_metadata.nodeSelector | indent 8 }} {{ toYaml .Values.pulsar_metadata.nodeSelector | indent 8 }}
{{- end }} {{- end }}
@ -49,18 +45,6 @@ spec:
{{ toYaml .Values.pulsar_metadata.tolerations | indent 8 }} {{ toYaml .Values.pulsar_metadata.tolerations | indent 8 }}
{{- end }} {{- end }}
initContainers: initContainers:
{{- if .Values.tls.bookie.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.bookie "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.bookie "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.bookie.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.toolset.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if and .Values.components.zookeeper .Values.bookkeeper.metadata.waitZookeeperTimeout (gt (.Values.bookkeeper.metadata.waitZookeeperTimeout | int) 0) }} {{- if and .Values.components.zookeeper .Values.bookkeeper.metadata.waitZookeeperTimeout (gt (.Values.bookkeeper.metadata.waitZookeeperTimeout | int) 0) }}
- name: wait-zookeeper-ready - name: wait-zookeeper-ready
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.bookie "root" .) }}" image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.bookie "root" .) }}"
@ -68,7 +52,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.bookkeeper.metadata.waitZookeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.bookkeeper.metadata.waitZookeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
{{- if $zk:=.Values.pulsar_metadata.userProvidedZookeepers }} {{- if $zk:=.Values.pulsar_metadata.userProvidedZookeepers }}
export PULSAR_MEM="-Xmx128M"; export PULSAR_MEM="-Xmx128M";
until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do
@ -87,7 +71,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.bookkeeper.metadata.waitOxiaTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.bookkeeper.metadata.waitOxiaTimeout }}", "sh", "-c"]
args: args:
- | - >-
until nslookup {{ template "pulsar.oxia.server.service" . }}; do until nslookup {{ template "pulsar.oxia.server.service" . }}; do
sleep 3; sleep 3;
done; done;
@ -102,7 +86,7 @@ spec:
{{- end }} {{- end }}
command: ["timeout", "{{ .Values.bookkeeper.metadata.initTimeout | default 60 }}", "sh", "-c"] command: ["timeout", "{{ .Values.bookkeeper.metadata.initTimeout | default 60 }}", "sh", "-c"]
args: args:
- | - >
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
{{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 12 }} {{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 12 }}
export BOOKIE_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
@ -117,6 +101,10 @@ spec:
{{- if .Values.extraInitCommand }} {{- if .Values.extraInitCommand }}
{{ .Values.extraInitCommand }} {{ .Values.extraInitCommand }}
{{- end }} {{- end }}
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
envFrom: envFrom:
- configMapRef: - configMapRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"

View File

@ -19,5 +19,40 @@
# deploy bookkeeper PodMonitor only when `$.Values.bookkeeper.podMonitor.enabled` is true # deploy bookkeeper PodMonitor only when `$.Values.bookkeeper.podMonitor.enabled` is true
{{- if $.Values.bookkeeper.podMonitor.enabled }} {{- if $.Values.bookkeeper.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "bookkeeper" (printf "component: %s" .Values.bookkeeper.component)) }} apiVersion: monitoring.coreos.com/v1
{{- end }} kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-bookie
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: bookie
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.bookkeeper.podMonitor.interval }}
scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.bookkeeper.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.bookkeeper.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: bookie
{{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end}}

View File

@ -26,9 +26,9 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.bookkeeper.component }} component: {{ .Values.bookkeeper.component }}
{{- with .Values.bookkeeper.service.annotations }} {{- if .Values.bookkeeper.service.annotations }}
annotations: annotations:
{{ toYaml . | indent 4 }} {{ toYaml .Values.bookkeeper.service.annotations | indent 4 }}
{{- end }} {{- end }}
spec: spec:
ports: ports:

View File

@ -23,7 +23,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.bookkeeper.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.bookkeeper.component }} component: {{ .Values.bookkeeper.component }}
@ -43,10 +42,8 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.bookkeeper.component }} component: {{ .Values.bookkeeper.component }}
annotations: annotations:
{{- if not .Values.bookkeeper.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.bookkeeper.ports.http }}" prometheus.io/port: "{{ .Values.bookkeeper.ports.http }}"
{{- end }}
{{- if .Values.bookkeeper.restartPodsOnConfigMapChange }} {{- if .Values.bookkeeper.restartPodsOnConfigMapChange }}
checksum/config: {{ include (print $.Template.BasePath "/bookkeeper-configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/bookkeeper-configmap.yaml") . | sha256sum }}
{{- end }} {{- end }}
@ -115,18 +112,6 @@ spec:
{{- end }} {{- end }}
{{- if and .Values.bookkeeper.waitMetadataTimeout (gt (.Values.bookkeeper.waitMetadataTimeout | int) 0) }} {{- if and .Values.bookkeeper.waitMetadataTimeout (gt (.Values.bookkeeper.waitMetadataTimeout | int) 0) }}
initContainers: initContainers:
{{- if .Values.tls.bookie.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.bookie "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.bookie "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.bookie.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.bookkeeper.certs.volumeMounts" . | nindent 8 }}
{{- end }}
# This initContainer will wait for bookkeeper initnewcluster to complete # This initContainer will wait for bookkeeper initnewcluster to complete
# before deploying the bookies # before deploying the bookies
- name: pulsar-bookkeeper-verify-clusterid - name: pulsar-bookkeeper-verify-clusterid
@ -136,11 +121,15 @@ spec:
command: ["timeout", "{{ .Values.bookkeeper.waitMetadataTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.bookkeeper.waitMetadataTimeout }}", "sh", "-c"]
args: args:
# only reformat bookie if bookkeeper is running without persistence # only reformat bookie if bookkeeper is running without persistence
- | - >
{{- include "pulsar.bookkeeper.init.verify_cluster_id" . | nindent 10 }} {{- include "pulsar.bookkeeper.init.verify_cluster_id" . | nindent 10 }}
envFrom: envFrom:
- configMapRef: - configMapRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end}}
volumeMounts: volumeMounts:
{{- include "pulsar.bookkeeper.certs.volumeMounts" . | nindent 8 }} {{- include "pulsar.bookkeeper.certs.volumeMounts" . | nindent 8 }}
{{- end }} {{- end }}
@ -187,34 +176,17 @@ spec:
{{- end }} {{- end }}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
# set required environment variables to use rocksdb config files provided in the Pulsar image {{- if .Values.bookkeeper.additionalCommand }}
export PULSAR_PREFIX_defaultRocksdbConf=${PULSAR_PREFIX_defaultRocksdbConf:-conf/default_rocksdb.conf}
export PULSAR_PREFIX_entryLocationRocksdbConf=${PULSAR_PREFIX_entryLocationRocksdbConf:-conf/entry_location_rocksdb.conf}
export PULSAR_PREFIX_ledgerMetadataRocksdbConf=${PULSAR_PREFIX_ledgerMetadataRocksdbConf:-conf/ledger_metadata_rocksdb.conf}
if [ -x bin/update-rocksdb-conf-from-env.py ] && [ -f "${PULSAR_PREFIX_entryLocationRocksdbConf}" ]; then
echo "Updating ${PULSAR_PREFIX_entryLocationRocksdbConf} from environment variables starting with dbStorage_rocksDB_*"
bin/update-rocksdb-conf-from-env.py "${PULSAR_PREFIX_entryLocationRocksdbConf}"
else
# Ensure that Bookkeeper will not load RocksDB config from existing files and fallback to use default RocksDB config
# See https://github.com/apache/bookkeeper/pull/3523 as reference
export PULSAR_PREFIX_defaultRocksdbConf=conf/non_existing_default_rocksdb.conf
export PULSAR_PREFIX_entryLocationRocksdbConf=conf/non_existing_entry_location_rocksdb.conf
export PULSAR_PREFIX_ledgerMetadataRocksdbConf=conf/non_existing_ledger_metadata_rocksdb.conf
# Ensure that Bookkeeper will use RocksDB format_version 5 (this currently applies only to the entry location rocksdb due to a bug in Bookkeeper)
export PULSAR_PREFIX_dbStorage_rocksDB_format_version=${PULSAR_PREFIX_dbStorage_rocksDB_format_version:-5}
fi
{{- if .Values.bookkeeper.additionalCommand }}
{{ .Values.bookkeeper.additionalCommand }} {{ .Values.bookkeeper.additionalCommand }}
{{- end }} {{- end }}
{{- if .Values.tls.bookie.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.bookie.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
{{- include "pulsar.bookkeeper.zookeeper.tls.settings" . | nindent 10 }} {{- include "pulsar.bookkeeper.zookeeper.tls.settings" . | nindent 10 }}
OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar bookie; OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar bookie;
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end}}
ports: ports:
- name: "{{ .Values.tcpPrefix }}bookie" - name: "{{ .Values.tcpPrefix }}bookie"
containerPort: {{ .Values.bookkeeper.ports.bookie }} containerPort: {{ .Values.bookkeeper.ports.bookie }}
@ -263,10 +235,10 @@ spec:
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
{{- include "pulsar.bookkeeper.certs.volumes" . | nindent 6 }} {{- include "pulsar.bookkeeper.certs.volumes" . | nindent 6 }}
{{- include "pulsar.imagePullSecrets" . | nindent 6}}
{{- if .Values.bookkeeper.extraVolumes }} {{- if .Values.bookkeeper.extraVolumes }}
{{ toYaml .Values.bookkeeper.extraVolumes | indent 6 }} {{ toYaml .Values.bookkeeper.extraVolumes | indent 6 }}
{{- end }} {{- end }}
{{- include "pulsar.imagePullSecrets" . | nindent 6}}
{{- if and (and .Values.persistence .Values.volumes.persistence) .Values.bookkeeper.volumes.persistence}} {{- if and (and .Values.persistence .Values.volumes.persistence) .Values.bookkeeper.volumes.persistence}}
volumeClaimTemplates: volumeClaimTemplates:
{{- if .Values.bookkeeper.volumes.useSingleCommonVolume }} {{- if .Values.bookkeeper.volumes.useSingleCommonVolume }}

View File

@ -29,18 +29,12 @@ metadata:
data: data:
# Metadata settings # Metadata settings
{{- if .Values.components.zookeeper }} {{- if .Values.components.zookeeper }}
metadataStoreUrl: "zk:{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}" zookeeperServers: "{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}"
{{- $configMetadataStoreUrl := "" }}
{{- if .Values.pulsar_metadata.configurationStore }} {{- if .Values.pulsar_metadata.configurationStore }}
{{- $configMetadataStoreUrl = printf "zk:%s%s" (include "pulsar.configurationStore.connect" .) .Values.pulsar_metadata.configurationStoreMetadataPrefix }} configurationStoreServers: "{{ template "pulsar.configurationStore.connect" . }}{{ .Values.pulsar_metadata.configurationStoreMetadataPrefix }}"
{{- else }}
{{- $configMetadataStoreUrl = printf "zk:%s%s" (include "pulsar.zookeeper.connect" .) .Values.metadataPrefix }}
{{- end }} {{- end }}
configurationMetadataStoreUrl: "{{ $configMetadataStoreUrl }}" {{- if not .Values.pulsar_metadata.configurationStore }}
{{- if .Values.pulsar_metadata.bookkeeper.usePulsarMetadataClientDriver }} configurationStoreServers: "{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}"
bookkeeperMetadataServiceUri: "metadata-store:{{ $configMetadataStoreUrl }}/ledgers"
{{- else }}
bookkeeperMetadataServiceUri: "zk+hierarchical://{{ template "pulsar.zookeeper.connect" . }}{{ .Values.metadataPrefix }}/ledgers"
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.components.oxia }} {{- if .Values.components.oxia }}
@ -49,49 +43,11 @@ data:
bookkeeperMetadataServiceUri: "{{ template "pulsar.oxia.metadata.url.bookkeeper" . }}" bookkeeperMetadataServiceUri: "{{ template "pulsar.oxia.metadata.url.bookkeeper" . }}"
{{- end }} {{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreAllowReadOnlyOperations" }}
PULSAR_PREFIX_metadataStoreAllowReadOnlyOperations: "{{ .Values.pulsar_metadata.metadataStoreAllowReadOnlyOperations }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreSessionTimeoutMillis" }}
metadataStoreSessionTimeoutMillis: "{{ .Values.pulsar_metadata.metadataStoreSessionTimeoutMillis }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreOperationTimeoutSeconds" }}
metadataStoreOperationTimeoutSeconds: "{{ .Values.pulsar_metadata.metadataStoreOperationTimeoutSeconds }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreCacheExpirySeconds" }}
metadataStoreCacheExpirySeconds: "{{ .Values.pulsar_metadata.metadataStoreCacheExpirySeconds }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreBatchingEnabled" }}
metadataStoreBatchingEnabled: "{{ .Values.pulsar_metadata.metadataStoreBatchingEnabled }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreBatchingMaxDelayMillis" }}
metadataStoreBatchingMaxDelayMillis: "{{ .Values.pulsar_metadata.metadataStoreBatchingMaxDelayMillis }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreBatchingMaxOperations" }}
metadataStoreBatchingMaxOperations: "{{ .Values.pulsar_metadata.metadataStoreBatchingMaxOperations }}"
{{- end }}
{{- if hasKey .Values.pulsar_metadata "metadataStoreBatchingMaxSizeKb" }}
metadataStoreBatchingMaxSizeKb: "{{ .Values.pulsar_metadata.metadataStoreBatchingMaxSizeKb }}"
{{- end }}
# Broker settings # Broker settings
clusterName: {{ template "pulsar.cluster.name" . }} clusterName: {{ template "pulsar.cluster.name" . }}
# Enable all metrics by default
exposeTopicLevelMetricsInPrometheus: "true" exposeTopicLevelMetricsInPrometheus: "true"
exposeConsumerLevelMetricsInPrometheus: "true"
exposeProducerLevelMetricsInPrometheus: "true"
exposeManagedLedgerMetricsInPrometheus: "true"
exposeManagedCursorMetricsInPrometheus: "true"
exposeBundlesMetricsInPrometheus: "true"
exposePublisherStats: "true"
exposePreciseBacklogInPrometheus: "true"
replicationMetricsEnabled: "true"
splitTopicAndPartitionLabelInPrometheus: "true"
aggregatePublisherStatsByProducerName: "true"
bookkeeperClientExposeStatsToPrometheus: "true"
numHttpServerThreads: "8" numHttpServerThreads: "8"
zooKeeperSessionTimeoutMillis: "30000"
statusFilePath: "{{ template "pulsar.home" . }}/logs/status" statusFilePath: "{{ template "pulsar.home" . }}/logs/status"
# Tiered storage settings # Tiered storage settings
@ -204,7 +160,7 @@ data:
# TLS Settings # TLS Settings
tlsCertificateFilePath: "/pulsar/certs/broker/tls.crt" tlsCertificateFilePath: "/pulsar/certs/broker/tls.crt"
tlsKeyFilePath: "/pulsar/certs/broker/tls.key" tlsKeyFilePath: "/pulsar/certs/broker/tls.key"
tlsTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.broker.cacerts.enabled | quote }} tlsTrustCertsFilePath: "/pulsar/certs/ca/ca.crt"
{{- end }} {{- end }}
# Authentication Settings # Authentication Settings
@ -217,14 +173,9 @@ data:
proxyRoles: {{ .Values.auth.superUsers.proxy }} proxyRoles: {{ .Values.auth.superUsers.proxy }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
# token authentication configuration # token authentication configuration
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled .Values.auth.authentication.openid.enabled }}
authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken,org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID"
{{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled ( not .Values.auth.authentication.openid.enabled ) }}
authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken" authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken"
{{- end }}
brokerClientAuthenticationParameters: "file:///pulsar/tokens/broker/token" brokerClientAuthenticationParameters: "file:///pulsar/tokens/broker/token"
brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken" brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
{{- if .Values.auth.authentication.jwt.usingSecretKey }} {{- if .Values.auth.authentication.jwt.usingSecretKey }}
@ -233,25 +184,6 @@ data:
tokenPublicKey: "file:///pulsar/keys/token/public.key" tokenPublicKey: "file:///pulsar/keys/token/public.key"
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.openid.enabled }}
# openid authentication configuration
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.openid.enabled ( not .Values.auth.authentication.jwt.enabled ) }}
authenticationProviders: "org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID"
{{- end }}
PULSAR_PREFIX_openIDAllowedTokenIssuers: {{ .Values.auth.authentication.openid.openIDAllowedTokenIssuers | uniq | compact | sortAlpha | join "," | quote }}
PULSAR_PREFIX_openIDAllowedAudiences: {{ .Values.auth.authentication.openid.openIDAllowedAudiences | uniq | compact | sortAlpha | join "," | quote }}
PULSAR_PREFIX_openIDTokenIssuerTrustCertsFilePath: {{ .Values.auth.authentication.openid.openIDTokenIssuerTrustCertsFilePath | quote }}
PULSAR_PREFIX_openIDRoleClaim: {{ .Values.auth.authentication.openid.openIDRoleClaim | quote }}
PULSAR_PREFIX_openIDAcceptedTimeLeewaySeconds: {{ .Values.auth.authentication.openid.openIDAcceptedTimeLeewaySeconds | quote }}
PULSAR_PREFIX_openIDCacheSize: {{ .Values.auth.authentication.openid.openIDCacheSize | quote }}
PULSAR_PREFIX_openIDCacheRefreshAfterWriteSeconds: {{ .Values.auth.authentication.openid.openIDCacheRefreshAfterWriteSeconds | quote }}
PULSAR_PREFIX_openIDCacheExpirationSeconds: {{ .Values.auth.authentication.openid.openIDCacheExpirationSeconds | quote }}
PULSAR_PREFIX_openIDHttpConnectionTimeoutMillis: {{ .Values.auth.authentication.openid.openIDHttpConnectionTimeoutMillis | quote }}
PULSAR_PREFIX_openIDHttpReadTimeoutMillis: {{ .Values.auth.authentication.openid.openIDHttpReadTimeoutMillis | quote }}
PULSAR_PREFIX_openIDKeyIdCacheMissRefreshSeconds: {{ .Values.auth.authentication.openid.openIDKeyIdCacheMissRefreshSeconds | quote }}
PULSAR_PREFIX_openIDRequireIssuersUseHttps: {{ .Values.auth.authentication.openid.openIDRequireIssuersUseHttps | quote }}
PULSAR_PREFIX_openIDFallbackDiscoveryMode: {{ .Values.auth.authentication.openid.openIDFallbackDiscoveryMode | quote }}
{{- end }}
{{- end }} {{- end }}
{{- if and .Values.tls.enabled .Values.tls.bookie.enabled }} {{- if and .Values.tls.enabled .Values.tls.bookie.enabled }}
@ -260,13 +192,13 @@ data:
bookkeeperTLSKeyFileType: "PEM" bookkeeperTLSKeyFileType: "PEM"
bookkeeperTLSKeyFilePath: "/pulsar/certs/broker/tls.key" bookkeeperTLSKeyFilePath: "/pulsar/certs/broker/tls.key"
bookkeeperTLSCertificateFilePath: "/pulsar/certs/broker/tls.crt" bookkeeperTLSCertificateFilePath: "/pulsar/certs/broker/tls.crt"
bookkeeperTLSTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.broker.cacerts.enabled | quote }} bookkeeperTLSTrustCertsFilePath: "/pulsar/certs/ca/ca.crt"
bookkeeperTLSTrustCertTypes: "PEM" bookkeeperTLSTrustCertTypes: "PEM"
PULSAR_PREFIX_bookkeeperTLSClientAuthentication: "true" PULSAR_PREFIX_bookkeeperTLSClientAuthentication: "true"
PULSAR_PREFIX_bookkeeperTLSKeyFileType: "PEM" PULSAR_PREFIX_bookkeeperTLSKeyFileType: "PEM"
PULSAR_PREFIX_bookkeeperTLSKeyFilePath: "/pulsar/certs/broker/tls.key" PULSAR_PREFIX_bookkeeperTLSKeyFilePath: "/pulsar/certs/broker/tls.key"
PULSAR_PREFIX_bookkeeperTLSCertificateFilePath: "/pulsar/certs/broker/tls.crt" PULSAR_PREFIX_bookkeeperTLSCertificateFilePath: "/pulsar/certs/broker/tls.crt"
PULSAR_PREFIX_bookkeeperTLSTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.broker.cacerts.enabled | quote }} PULSAR_PREFIX_bookkeeperTLSTrustCertsFilePath: "/pulsar/certs/ca/ca.crt"
PULSAR_PREFIX_bookkeeperTLSTrustCertTypes: "PEM" PULSAR_PREFIX_bookkeeperTLSTrustCertTypes: "PEM"
# https://github.com/apache/bookkeeper/pull/2300 # https://github.com/apache/bookkeeper/pull/2300
bookkeeperUseV2WireProtocol: "false" bookkeeperUseV2WireProtocol: "false"

View File

@ -19,5 +19,40 @@
# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true # deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
{{- if $.Values.broker.podMonitor.enabled }} {{- if $.Values.broker.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "broker" (printf "component: %s" .Values.broker.component)) }} apiVersion: monitoring.coreos.com/v1
{{- end }} kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-broker
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: broker
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.broker.podMonitor.interval }}
scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.broker.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.broker.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: broker
{{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}-psp"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}-psp"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}-psp"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}-acct"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end}}

View File

@ -26,7 +26,7 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.broker.component }} component: {{ .Values.broker.component }}
{{- with .Values.broker.service.annotations }} {{- with .Values.broker.service_account.annotations }}
annotations: annotations:
{{ toYaml . | indent 4 }} {{ toYaml . | indent 4 }}
{{- end }} {{- end }}

View File

@ -25,7 +25,6 @@ metadata:
name: {{ $stsName | quote }} name: {{ $stsName | quote }}
{{- $namespace := include "pulsar.namespace" . }} {{- $namespace := include "pulsar.namespace" . }}
namespace: {{ $namespace | quote }} namespace: {{ $namespace | quote }}
annotations: {{ .Values.broker.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.broker.component }} component: {{ .Values.broker.component }}
@ -63,10 +62,8 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.broker.component }} component: {{ .Values.broker.component }}
annotations: annotations:
{{- if not .Values.broker.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.broker.ports.http }}" prometheus.io/port: "{{ .Values.broker.ports.http }}"
{{- end }}
{{- if .Values.broker.restartPodsOnConfigMapChange }} {{- if .Values.broker.restartPodsOnConfigMapChange }}
checksum/config: {{ include (print $.Template.BasePath "/broker-configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/broker-configmap.yaml") . | sha256sum }}
{{- end }} {{- end }}
@ -130,18 +127,6 @@ spec:
{{- end }} {{- end }}
terminationGracePeriodSeconds: {{ .Values.broker.gracePeriod }} terminationGracePeriodSeconds: {{ .Values.broker.gracePeriod }}
initContainers: initContainers:
{{- if .Values.tls.broker.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.broker "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.broker "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.broker.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.broker.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if and .Values.components.zookeeper .Values.broker.waitZookeeperTimeout (gt (.Values.broker.waitZookeeperTimeout | int) 0) }} {{- if and .Values.components.zookeeper .Values.broker.waitZookeeperTimeout (gt (.Values.broker.waitZookeeperTimeout | int) 0) }}
# This init container will wait for zookeeper to be ready before # This init container will wait for zookeeper to be ready before
# deploying the bookies # deploying the bookies
@ -151,17 +136,21 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.broker.waitZookeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.broker.waitZookeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
{{- include "pulsar.broker.zookeeper.tls.settings" . | nindent 12 }} {{- include "pulsar.broker.zookeeper.tls.settings" . | nindent 12 }}
export PULSAR_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
{{- if .Values.pulsar_metadata.configurationStore }} {{- if .Values.pulsar_metadata.configurationStore }}
until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.configurationStore.connect" . }} get {{ .Values.pulsar_metadata.configurationStoreMetadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.configurationStore.connect" . }} get {{ .Values.configurationStoreMetadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do
{{- end }} {{- end }}
{{- if not .Values.pulsar_metadata.configurationStore }} {{- if not .Values.pulsar_metadata.configurationStore }}
until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.zookeeper.connect" . }} get {{ .Values.metadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.zookeeper.connect" . }} get {{ .Values.metadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do
{{- end }} {{- end }}
echo "pulsar cluster {{ template "pulsar.cluster.name" . }} isn't initialized yet ... check in 3 seconds ..." && sleep 3; echo "pulsar cluster {{ template "pulsar.cluster.name" . }} isn't initialized yet ... check in 3 seconds ..." && sleep 3;
done; done;
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
volumeMounts: volumeMounts:
{{- include "pulsar.broker.certs.volumeMounts" . | nindent 8 }} {{- include "pulsar.broker.certs.volumeMounts" . | nindent 8 }}
{{- end }} {{- end }}
@ -172,7 +161,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.broker.waitOxiaTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.broker.waitOxiaTimeout }}", "sh", "-c"]
args: args:
- | - >-
until nslookup {{ template "pulsar.oxia.server.service" . }}; do until nslookup {{ template "pulsar.oxia.server.service" . }}; do
sleep 3; sleep 3;
done; done;
@ -186,7 +175,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.broker.waitBookkeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.broker.waitBookkeeperTimeout }}", "sh", "-c"]
args: args:
- | - >
{{- include "pulsar.broker.zookeeper.tls.settings" . | nindent 12 }} {{- include "pulsar.broker.zookeeper.tls.settings" . | nindent 12 }}
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
export BOOKIE_MEM="-Xmx128M"; export BOOKIE_MEM="-Xmx128M";
@ -205,6 +194,10 @@ spec:
envFrom: envFrom:
- configMapRef: - configMapRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
volumeMounts: volumeMounts:
{{- include "pulsar.broker.certs.volumeMounts" . | nindent 10 }} {{- include "pulsar.broker.certs.volumeMounts" . | nindent 10 }}
{{- end }} {{- end }}
@ -251,15 +244,10 @@ spec:
{{- end }} {{- end }}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
{{- if .Values.broker.additionalCommand }} {{- if .Values.broker.additionalCommand }}
{{ .Values.broker.additionalCommand }} {{ .Values.broker.additionalCommand }}
{{- end }} {{- end }}
{{- if .Values.tls.broker.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.broker.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/broker.conf; bin/apply-config-from-env.py conf/broker.conf;
bin/gen-yml-from-env.py conf/functions_worker.yml; bin/gen-yml-from-env.py conf/functions_worker.yml;
echo "OK" > "${statusFilePath:-status}"; echo "OK" > "${statusFilePath:-status}";
@ -293,7 +281,7 @@ spec:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}"
volumeMounts: volumeMounts:
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- mountPath: "/pulsar/keys" - mountPath: "/pulsar/keys"
name: token-keys name: token-keys
readOnly: true readOnly: true
@ -313,6 +301,10 @@ spec:
{{ toYaml .Values.broker.extraVolumeMounts | indent 10 }} {{ toYaml .Values.broker.extraVolumeMounts | indent 10 }}
{{- end }} {{- end }}
{{- include "pulsar.broker.certs.volumeMounts" . | nindent 10 }} {{- include "pulsar.broker.certs.volumeMounts" . | nindent 10 }}
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
env: env:
{{- if and (and .Values.broker.storageOffload (eq .Values.broker.storageOffload.driver "aws-s3")) .Values.broker.storageOffload.secret }} {{- if and (and .Values.broker.storageOffload (eq .Values.broker.storageOffload.driver "aws-s3")) .Values.broker.storageOffload.secret }}
- name: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY_ID
@ -346,7 +338,7 @@ spec:
{{ toYaml .Values.broker.extraVolumes | indent 6 }} {{ toYaml .Values.broker.extraVolumes | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- name: token-keys - name: token-keys
secret: secret:
{{- if not .Values.auth.authentication.jwt.usingSecretKey }} {{- if not .Values.auth.authentication.jwt.usingSecretKey }}

View File

@ -1,82 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ template "pulsar.fullname" . }}-certs-scripts"
namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
component: certs-scripts
data:
certs-combine-pem.sh: |
#!/bin/bash
# This script combines all certificates into a single file.
# Usage: certs-combine-pem.sh <output_file> <cert1> <cert2> ...
set -eu -o pipefail
if [ "$#" -lt 2 ]; then
echo "Usage: $0 <output_file> <cert1> <cert2> ..."
exit 1
fi
OUTPUT_FILE="$1"
shift
OUTPUT_FILE_TMP="${OUTPUT_FILE}.tmp"
rm -f "$OUTPUT_FILE_TMP"
for CERT in "$@"; do
if [ -f "$CERT" ]; then
echo "# $CERT" >> "$OUTPUT_FILE_TMP"
cat "$CERT" >> "$OUTPUT_FILE_TMP"
else
echo "Certificate file '$CERT' does not exist, skipping"
fi
done
if [ ! -f "$OUTPUT_FILE" ]; then
touch "$OUTPUT_FILE"
fi
if diff -q "$OUTPUT_FILE" "$OUTPUT_FILE_TMP" > /dev/null; then
# No changes detected, skipping update
rm -f "$OUTPUT_FILE_TMP"
else
# Update $OUTPUT_FILE with new certificates
mv "$OUTPUT_FILE_TMP" "$OUTPUT_FILE"
fi
certs-combine-pem-infinity.sh: |
#!/bin/bash
# This script combines all certificates into a single file, every minutes.
# Usage: certs-combine-pem-infinity.sh <output_file> <cert1> <cert2> ...
set -eu -o pipefail
if [ "$#" -lt 2 ]; then
echo "Usage: $0 <output_file> <cert1> <cert2> ..."
exit 1
fi
while true; do
/pulsar/bin/certs-combine-pem.sh "$@"
sleep 60
done

View File

@ -0,0 +1,110 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# script to process key/cert to keystore and truststore
{{- if .Values.tls.zookeeper.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
component: keytool
data:
keytool.sh: |
#!/bin/bash
component=$1
name=$2
isClient=$3
crtFile=/pulsar/certs/${component}/tls.crt
keyFile=/pulsar/certs/${component}/tls.key
caFile=/pulsar/certs/ca/ca.crt
tlsDir=/tmp/pulsar-tls$$
p12File=${tlsDir}/${component}.p12
keyStoreFile=${tlsDir}/${component}.keystore.jks
trustStoreFile=${tlsDir}/${component}.truststore.jks
# create tmp dir for keystore and truststore files
mkdir ${tlsDir}
chmod 0700 ${tlsDir}
function checkFile() {
local file=$1
local len=$(wc -c ${file} | awk '{print $1}')
echo "processing ${file} : len = ${len}"
if [ ! -f ${file} ]; then
echo "${file} is not found"
return -1
fi
if [ $len -le 0 ]; then
echo "${file} is empty"
return -1
fi
}
function ensureFileNotEmpty() {
local file=$1
until checkFile ${file}; do
echo "file isn't initialized yet ... check in 3 seconds ..." && sleep 3;
done;
}
ensureFileNotEmpty ${crtFile}
ensureFileNotEmpty ${keyFile}
ensureFileNotEmpty ${caFile}
PASSWORD=$(head /dev/urandom | base64 | head -c 24)
openssl pkcs12 \
-export \
-in ${crtFile} \
-inkey ${keyFile} \
-out ${p12File} \
-name ${name} \
-passout "pass:${PASSWORD}"
keytool -importkeystore \
-srckeystore ${p12File} \
-srcstoretype PKCS12 -srcstorepass "${PASSWORD}" \
-alias ${name} \
-destkeystore ${keyStoreFile} \
-deststorepass "${PASSWORD}"
keytool -import \
-file ${caFile} \
-storetype JKS \
-alias ${name} \
-keystore ${trustStoreFile} \
-storepass "${PASSWORD}" \
-trustcacerts -noprompt
ensureFileNotEmpty ${keyStoreFile}
ensureFileNotEmpty ${trustStoreFile}
if [[ "x${isClient}" == "xtrue" ]]; then
echo $'\n' >> conf/pulsar_env.sh
echo "PULSAR_EXTRA_OPTS=\"\${PULSAR_EXTRA_OPTS} -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=${keyStoreFile} -Dzookeeper.ssl.keyStore.password=${PASSWORD} -Dzookeeper.ssl.trustStore.location=${trustStoreFile} -Dzookeeper.ssl.trustStore.password=${PASSWORD}\"" >> conf/pulsar_env.sh
echo $'\n' >> conf/bkenv.sh
echo "BOOKIE_EXTRA_OPTS=\"\${BOOKIE_EXTRA_OPTS} -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=${keyStoreFile} -Dzookeeper.ssl.keyStore.password=${PASSWORD} -Dzookeeper.ssl.trustStore.location=${trustStoreFile} -Dzookeeper.ssl.trustStore.password=${PASSWORD}\"" >> conf/bkenv.sh
else
echo $'\n' >> conf/pulsar_env.sh
echo "PULSAR_EXTRA_OPTS=\"\${PULSAR_EXTRA_OPTS} -Dzookeeper.ssl.keyStore.location=${keyStoreFile} -Dzookeeper.ssl.keyStore.password=${PASSWORD} -Dzookeeper.ssl.trustStore.location=${trustStoreFile} -Dzookeeper.ssl.trustStore.password=${PASSWORD}\"" >> conf/pulsar_env.sh
fi
{{- end }}

View File

@ -16,7 +16,7 @@
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
# #
{{- if and .Values.components.oxia (not .Values.oxia.coordinator.customConfigMapName) }} {{- if .Values.components.oxia }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
@ -29,4 +29,4 @@ data:
config.yaml: | config.yaml: |
{{- include "oxia.coordinator.config.yaml" . | nindent 4 }} {{- include "oxia.coordinator.config.yaml" . | nindent 4 }}
{{- end }} {{- end }}

View File

@ -26,7 +26,6 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.oxia.component }}-coordinator component: {{ .Values.oxia.component }}-coordinator
annotations: {{ .Values.oxia.coordinator.appAnnotations | toYaml | nindent 4 }}
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -41,32 +40,23 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.oxia.component }}-coordinator component: {{ .Values.oxia.component }}-coordinator
annotations: annotations:
{{- if not .Values.oxia.coordinator.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.oxia.coordinator.ports.metrics }}" prometheus.io/port: "{{ .Values.oxia.coordinator.ports.metrics }}"
{{- end }}
{{- with .Values.oxia.coordinator.annotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec: spec:
{{- if .Values.oxia.coordinator.nodeSelector }} {{- if .Values.oxia.server.nodeSelector }}
nodeSelector: nodeSelector:
{{ toYaml .Values.oxia.coordinator.nodeSelector | indent 8 }} {{ toYaml .Values.oxia.server.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.oxia.coordinator.tolerations }} {{- if .Values.oxia.server.tolerations }}
tolerations: tolerations:
{{ toYaml .Values.oxia.coordinator.tolerations | indent 8 }} {{ toYaml .Values.oxia.server.tolerations | indent 8 }}
{{- end }} {{- end }}
serviceAccountName: {{ template "pulsar.fullname" . }}-{{ .Values.oxia.component }}-coordinator serviceAccountName: {{ template "pulsar.fullname" . }}-{{ .Values.oxia.component }}-coordinator
containers: containers:
- command: - command:
{{- if .Values.oxia.coordinator.entrypoint }}
{{ toYaml .Values.oxia.coordinator.entrypoint | indent 12 }}
{{- else }}
{{- include "oxia.coordinator.entrypoint" . | nindent 12 }} {{- include "oxia.coordinator.entrypoint" . | nindent 12 }}
{{- end }}
image: "{{ .Values.images.oxia.repository }}:{{ .Values.images.oxia.tag }}" image: "{{ .Values.images.oxia.repository }}:{{ .Values.images.oxia.tag }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.oxia "root" .) }}" imagePullPolicy: {{ .Values.images.oxia.pullPolicy }}
name: coordinator name: coordinator
ports: ports:
{{- range $key, $value := .Values.oxia.coordinator.ports }} {{- range $key, $value := .Values.oxia.coordinator.ports }}
@ -77,19 +67,8 @@ spec:
limits: limits:
cpu: {{ .Values.oxia.coordinator.cpuLimit }} cpu: {{ .Values.oxia.coordinator.cpuLimit }}
memory: {{ .Values.oxia.coordinator.memoryLimit }} memory: {{ .Values.oxia.coordinator.memoryLimit }}
{{- if .Values.oxia.coordinator.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.oxia.coordinator.extraVolumeMounts | nindent 12 }}
{{- end }}
livenessProbe: livenessProbe:
{{- include "oxia-cluster.probe" .Values.oxia.coordinator.ports.internal | nindent 12 }} {{- include "oxia-cluster.probe" .Values.oxia.coordinator.ports.internal | nindent 12 }}
readinessProbe: readinessProbe:
{{- include "oxia-cluster.probe" .Values.oxia.coordinator.ports.internal | nindent 12 }} {{- include "oxia-cluster.probe" .Values.oxia.coordinator.ports.internal | nindent 12 }}
{{- if .Values.oxia.coordinator.extraContainers }} {{- end }}
{{- toYaml .Values.oxia.coordinator.extraContainers | nindent 8 }}
{{- end }}
{{- if .Values.oxia.coordinator.extraVolumes }}
volumes:
{{- toYaml .Values.oxia.coordinator.extraVolumes | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -17,7 +17,42 @@
# under the License. # under the License.
# #
# deploy oxia-coordinator PodMonitor only when `$.Values.oxia.coordinator.podMonitor.enabled` is true # deploy oxia-coordinator PodMonitor only when `$.Values.oxia.podMonitor.enabled` is true
{{- if and $.Values.components.oxia $.Values.oxia.coordinator.podMonitor.enabled }} {{- if and $.Values.components.oxia $.Values.oxia.coordinator.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "oxia.coordinator" (printf "component: %s-coordinator" .Values.oxia.component) "metrics") }} apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-oxia-coordinator
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: oxia-coordinator
podMetricsEndpoints:
- port: metrics
path: /metrics
scheme: http
interval: {{ $.Values.oxia.coordinator.podMonitor.interval }}
scrapeTimeout: {{ $.Values.oxia.coordinator.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.oxia.coordinator.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.oxia.coordinator.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: oxia-coordinator
{{- end }} {{- end }}

View File

@ -17,7 +17,42 @@
# under the License. # under the License.
# #
# deploy oxia-server PodMonitor only when `$.Values.oxia.server.podMonitor.enabled` is true # deploy oxia-server PodMonitor only when `$.Values.oxia.podMonitor.enabled` is true
{{- if and $.Values.components.oxia $.Values.oxia.server.podMonitor.enabled }} {{- if and $.Values.components.oxia $.Values.oxia.server.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "oxia.server" (printf "component: %s-server" .Values.oxia.component) "metrics") }} apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-oxia-server
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: oxia-server
podMetricsEndpoints:
- port: metrics
path: /metrics
scheme: http
interval: {{ $.Values.oxia.server.podMonitor.interval }}
scrapeTimeout: {{ $.Values.oxia.server.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.oxia.server.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.oxia.server.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: oxia-server
{{- end }} {{- end }}

View File

@ -26,7 +26,6 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.oxia.component }}-server component: {{ .Values.oxia.component }}-server
annotations: {{ .Values.oxia.server.appAnnotations | toYaml | nindent 4 }}
spec: spec:
replicas: {{ .Values.oxia.server.replicas }} replicas: {{ .Values.oxia.server.replicas }}
selector: selector:
@ -41,13 +40,8 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.oxia.component }}-server component: {{ .Values.oxia.component }}-server
annotations: annotations:
{{- if not .Values.oxia.server.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.oxia.server.ports.metrics }}" prometheus.io/port: "{{ .Values.oxia.server.ports.metrics }}"
{{- end }}
{{- with .Values.oxia.server.annotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec: spec:
{{- if .Values.oxia.server.nodeSelector }} {{- if .Values.oxia.server.nodeSelector }}
nodeSelector: nodeSelector:
@ -118,8 +112,8 @@ spec:
{{- if .Values.oxia.pprofEnabled }} {{- if .Values.oxia.pprofEnabled }}
- "--profile" - "--profile"
{{- end}} {{- end}}
image: "{{ .Values.images.oxia.repository }}:{{ .Values.images.oxia.tag }}" image: "{{ .Values.images.oxia.repository }}:{{ .Values.images.oxia.tag | default .Chart.AppVersion }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.oxia "root" .) }}" imagePullPolicy: {{ .Values.images.oxia.pullPolicy }}
name: server name: server
ports: ports:
{{- range $key, $value := .Values.oxia.server.ports }} {{- range $key, $value := .Values.oxia.server.ports }}
@ -150,4 +144,4 @@ spec:
resources: resources:
requests: requests:
storage: {{ .Values.oxia.server.storageSize }} storage: {{ .Values.oxia.server.storageSize }}
{{- end}} {{- end}}

View File

@ -42,14 +42,14 @@ data:
webServicePortTls: "{{ .Values.proxy.ports.containerPorts.https }}" webServicePortTls: "{{ .Values.proxy.ports.containerPorts.https }}"
tlsCertificateFilePath: "/pulsar/certs/proxy/tls.crt" tlsCertificateFilePath: "/pulsar/certs/proxy/tls.crt"
tlsKeyFilePath: "/pulsar/certs/proxy/tls.key" tlsKeyFilePath: "/pulsar/certs/proxy/tls.key"
tlsTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.proxy.cacerts.enabled | quote }} tlsTrustCertsFilePath: "/pulsar/certs/ca/ca.crt"
{{- if and .Values.tls.enabled .Values.tls.broker.enabled }} {{- if and .Values.tls.enabled .Values.tls.broker.enabled }}
# if broker enables TLS, configure proxy to talk to broker using TLS # if broker enables TLS, configure proxy to talk to broker using TLS
brokerServiceURLTLS: pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsarssl }} brokerServiceURLTLS: pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsarssl }}
brokerWebServiceURLTLS: https://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.https }} brokerWebServiceURLTLS: https://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.https }}
tlsEnabledWithBroker: "true" tlsEnabledWithBroker: "true"
tlsCertRefreshCheckDurationSec: "300" tlsCertRefreshCheckDurationSec: "300"
brokerClientTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.proxy.cacerts.enabled | quote }} brokerClientTrustCertsFilePath: "/pulsar/certs/ca/ca.crt"
{{- end }} {{- end }}
{{- if not (and .Values.tls.enabled .Values.tls.broker.enabled) }} {{- if not (and .Values.tls.enabled .Values.tls.broker.enabled) }}
brokerServiceURL: pulsar://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsar }} brokerServiceURL: pulsar://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsar }}
@ -70,14 +70,9 @@ data:
superUserRoles: {{ .Values.auth.superUsers | values | compact | sortAlpha | join "," }} superUserRoles: {{ .Values.auth.superUsers | values | compact | sortAlpha | join "," }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
# token authentication configuration # token authentication configuration
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled .Values.auth.authentication.openid.enabled }}
authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken,org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID"
{{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.jwt.enabled ( not .Values.auth.authentication.openid.enabled ) }}
authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken" authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken"
{{- end }}
brokerClientAuthenticationParameters: "file:///pulsar/tokens/proxy/token" brokerClientAuthenticationParameters: "file:///pulsar/tokens/proxy/token"
brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken" brokerClientAuthenticationPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
{{- if .Values.auth.authentication.jwt.usingSecretKey }} {{- if .Values.auth.authentication.jwt.usingSecretKey }}
@ -86,25 +81,6 @@ data:
tokenPublicKey: "file:///pulsar/keys/token/public.key" tokenPublicKey: "file:///pulsar/keys/token/public.key"
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.openid.enabled }}
# openid authentication configuration
{{- if and .Values.auth.authentication.enabled .Values.auth.authentication.openid.enabled ( not .Values.auth.authentication.jwt.enabled ) }}
authenticationProviders: "org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID"
{{- end }}
PULSAR_PREFIX_openIDAllowedTokenIssuers: {{ .Values.auth.authentication.openid.openIDAllowedTokenIssuers | uniq | compact | sortAlpha | join "," | quote }}
PULSAR_PREFIX_openIDAllowedAudiences: {{ .Values.auth.authentication.openid.openIDAllowedAudiences | uniq | compact | sortAlpha | join "," | quote }}
PULSAR_PREFIX_openIDTokenIssuerTrustCertsFilePath: {{ .Values.auth.authentication.openid.openIDTokenIssuerTrustCertsFilePath | quote }}
PULSAR_PREFIX_openIDRoleClaim: {{ .Values.auth.authentication.openid.openIDRoleClaim | quote }}
PULSAR_PREFIX_openIDAcceptedTimeLeewaySeconds: {{ .Values.auth.authentication.openid.openIDAcceptedTimeLeewaySeconds | quote }}
PULSAR_PREFIX_openIDCacheSize: {{ .Values.auth.authentication.openid.openIDCacheSize | quote }}
PULSAR_PREFIX_openIDCacheRefreshAfterWriteSeconds: {{ .Values.auth.authentication.openid.openIDCacheRefreshAfterWriteSeconds | quote }}
PULSAR_PREFIX_openIDCacheExpirationSeconds: {{ .Values.auth.authentication.openid.openIDCacheExpirationSeconds | quote }}
PULSAR_PREFIX_openIDHttpConnectionTimeoutMillis: {{ .Values.auth.authentication.openid.openIDHttpConnectionTimeoutMillis | quote }}
PULSAR_PREFIX_openIDHttpReadTimeoutMillis: {{ .Values.auth.authentication.openid.openIDHttpReadTimeoutMillis | quote }}
PULSAR_PREFIX_openIDKeyIdCacheMissRefreshSeconds: {{ .Values.auth.authentication.openid.openIDKeyIdCacheMissRefreshSeconds | quote }}
PULSAR_PREFIX_openIDRequireIssuersUseHttps: {{ .Values.auth.authentication.openid.openIDRequireIssuersUseHttps | quote }}
PULSAR_PREFIX_openIDFallbackDiscoveryMode: {{ .Values.auth.authentication.openid.openIDFallbackDiscoveryMode | quote }}
{{- end }}
{{- end }} {{- end }}
{{ toYaml .Values.proxy.configData | indent 2 }} {{ toYaml .Values.proxy.configData | indent 2 }}
{{- end }} {{- end }}

View File

@ -27,8 +27,6 @@ kind: HorizontalPodAutoscaler
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
spec: spec:
maxReplicas: {{ .Values.proxy.autoscaling.maxReplicas }} maxReplicas: {{ .Values.proxy.autoscaling.maxReplicas }}
{{- with .Values.proxy.autoscaling.metrics }} {{- with .Values.proxy.autoscaling.metrics }}

View File

@ -19,5 +19,40 @@
# deploy proxy PodMonitor only when `$.Values.proxy.podMonitor.enabled` is true # deploy proxy PodMonitor only when `$.Values.proxy.podMonitor.enabled` is true
{{- if $.Values.proxy.podMonitor.enabled }} {{- if $.Values.proxy.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "proxy" (printf "component: %s" .Values.proxy.component) "sts-http") }} apiVersion: monitoring.coreos.com/v1
{{- end }} kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-proxy
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: proxy
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.proxy.podMonitor.interval }}
scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.proxy.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.proxy.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: proxy
{{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end}}

View File

@ -23,7 +23,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.proxy.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.proxy.component }} component: {{ .Values.proxy.component }}
@ -45,10 +44,8 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.proxy.component }} component: {{ .Values.proxy.component }}
annotations: annotations:
{{- if not .Values.proxy.podMonitor.enabled }}
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.proxy.ports.containerPorts.http }}" prometheus.io/port: "{{ .Values.proxy.ports.containerPorts.http }}"
{{- end }}
{{- if .Values.proxy.restartPodsOnConfigMapChange }} {{- if .Values.proxy.restartPodsOnConfigMapChange }}
checksum/config: {{ include (print $.Template.BasePath "/proxy-configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/proxy-configmap.yaml") . | sha256sum }}
{{- end }} {{- end }}
@ -112,18 +109,6 @@ spec:
terminationGracePeriodSeconds: {{ .Values.proxy.gracePeriod }} terminationGracePeriodSeconds: {{ .Values.proxy.gracePeriod }}
serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}" serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
initContainers: initContainers:
{{- if .Values.tls.proxy.cacerts.enabled }}
- name: combine-certs
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.proxy "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.proxy "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.proxy.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.proxy.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if and .Values.components.zookeeper .Values.proxy.waitZookeeperTimeout (gt (.Values.proxy.waitZookeeperTimeout | int) 0) }} {{- if and .Values.components.zookeeper .Values.proxy.waitZookeeperTimeout (gt (.Values.proxy.waitZookeeperTimeout | int) 0) }}
# This init container will wait for zookeeper to be ready before # This init container will wait for zookeeper to be ready before
# deploying the bookies # deploying the bookies
@ -133,19 +118,15 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.proxy.waitZookeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.proxy.waitZookeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
export PULSAR_MEM="-Xmx128M"; export PULSAR_MEM="-Xmx128M";
{{- if $zk:=.Values.pulsar_metadata.userProvidedZookeepers }} {{- if $zk:=.Values.pulsar_metadata.userProvidedZookeepers }}
until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do
echo "user provided zookeepers {{ $zk }} are unreachable... check in 3 seconds ..." && sleep 3; echo "user provided zookeepers {{ $zk }} are unreachable... check in 3 seconds ..." && sleep 3;
done;
{{- else if .Values.pulsar_metadata.configurationStore }}
until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.configurationStore.service" . }} get {{ .Values.pulsar_metadata.configurationStoreMetadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do
echo "pulsar cluster {{ template "pulsar.cluster.name" . }} isn't initialized yet ... check in 3 seconds ..." && sleep 3;
done; done;
{{- else }} {{ else }}
until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.zookeeper.service" . }} get {{ .Values.metadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ template "pulsar.configurationStore.service" . }} get {{ .Values.metadataPrefix }}/admin/clusters/{{ template "pulsar.cluster.name" . }}; do
echo "pulsar cluster {{ template "pulsar.cluster.name" . }} isn't initialized yet ... check in 3 seconds ..." && sleep 3; sleep 3;
done; done;
{{- end}} {{- end}}
{{- end}} {{- end}}
@ -156,7 +137,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.proxy.waitOxiaTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.proxy.waitOxiaTimeout }}", "sh", "-c"]
args: args:
- | - >-
until nslookup {{ template "pulsar.oxia.server.service" . }}; do until nslookup {{ template "pulsar.oxia.server.service" . }}; do
sleep 3; sleep 3;
done; done;
@ -170,7 +151,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.proxy.waitBrokerTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.proxy.waitBrokerTimeout }}", "sh", "-c"]
args: args:
- | - >-
set -e; set -e;
brokerServiceNumber="$(nslookup -timeout=10 {{ template "pulsar.fullname" . }}-{{ .Values.broker.component }} | grep Name | wc -l)"; brokerServiceNumber="$(nslookup -timeout=10 {{ template "pulsar.fullname" . }}-{{ .Values.broker.component }} | grep Name | wc -l)";
until [ ${brokerServiceNumber} -ge 1 ]; do until [ ${brokerServiceNumber} -ge 1 ]; do
@ -222,15 +203,10 @@ spec:
{{- end }} {{- end }}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
{{- if .Values.proxy.additionalCommand }} {{- if .Values.proxy.additionalCommand }}
{{ .Values.proxy.additionalCommand }} {{ .Values.proxy.additionalCommand }}
{{- end }} {{- end }}
{{- if .Values.tls.proxy.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.proxy.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/proxy.conf && bin/apply-config-from-env.py conf/proxy.conf &&
echo "OK" > "${statusFilePath:-status}" && echo "OK" > "${statusFilePath:-status}" &&
OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar proxy OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar proxy
@ -248,6 +224,10 @@ spec:
- name: "sts-{{ .Values.tlsPrefix }}pulsarssl" - name: "sts-{{ .Values.tlsPrefix }}pulsarssl"
containerPort: {{ .Values.proxy.ports.pulsarssl }} containerPort: {{ .Values.proxy.ports.pulsarssl }}
{{- end }} {{- end }}
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
{{- if .Values.proxy.extraEnvs }} {{- if .Values.proxy.extraEnvs }}
env: env:
{{ toYaml .Values.proxy.extraEnvs | indent 8 }} {{ toYaml .Values.proxy.extraEnvs | indent 8 }}
@ -258,7 +238,7 @@ spec:
{{- if or .Values.proxy.extraVolumeMounts .Values.auth.authentication.enabled (and .Values.tls.enabled (or .Values.tls.proxy.enabled .Values.tls.broker.enabled)) }} {{- if or .Values.proxy.extraVolumeMounts .Values.auth.authentication.enabled (and .Values.tls.enabled (or .Values.tls.proxy.enabled .Values.tls.broker.enabled)) }}
volumeMounts: volumeMounts:
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- mountPath: "/pulsar/keys" - mountPath: "/pulsar/keys"
name: token-keys name: token-keys
readOnly: true readOnly: true
@ -267,7 +247,16 @@ spec:
readOnly: true readOnly: true
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- include "pulsar.proxy.certs.volumeMounts" . | nindent 10 }} {{- if .Values.tls.proxy.enabled }}
- mountPath: "/pulsar/certs/proxy"
name: proxy-certs
readOnly: true
{{- end}}
{{- if .Values.tls.enabled }}
- mountPath: "/pulsar/certs/ca"
name: ca
readOnly: true
{{- end}}
{{- if .Values.proxy.extraVolumeMounts }} {{- if .Values.proxy.extraVolumeMounts }}
{{ toYaml .Values.proxy.extraVolumeMounts | indent 10 }} {{ toYaml .Values.proxy.extraVolumeMounts | indent 10 }}
{{- end }} {{- end }}
@ -279,7 +268,7 @@ spec:
{{ toYaml .Values.proxy.extraVolumes | indent 8 }} {{ toYaml .Values.proxy.extraVolumes | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- name: token-keys - name: token-keys
secret: secret:
{{- if not .Values.auth.authentication.jwt.usingSecretKey }} {{- if not .Values.auth.authentication.jwt.usingSecretKey }}
@ -304,6 +293,26 @@ spec:
path: proxy/token path: proxy/token
{{- end}} {{- end}}
{{- end}} {{- end}}
{{- include "pulsar.proxy.certs.volumes" . | nindent 8 }} {{- if .Values.tls.proxy.enabled }}
- name: ca
secret:
{{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items:
- key: ca.crt
path: ca.crt
- name: proxy-certs
secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.proxy.cert_name }}"
items:
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key
{{- end}}
{{- end}} {{- end}}
{{- end }} {{- end }}

View File

@ -34,10 +34,6 @@ spec:
ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }} ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }}
{{- end }} {{- end }}
template: template:
metadata:
labels:
{{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.pulsar_metadata.component }}
spec: spec:
{{- include "pulsar.imagePullSecrets" . | nindent 6 }} {{- include "pulsar.imagePullSecrets" . | nindent 6 }}
{{- if .Values.pulsar_metadata.nodeSelector }} {{- if .Values.pulsar_metadata.nodeSelector }}
@ -45,18 +41,6 @@ spec:
{{ toYaml .Values.pulsar_metadata.nodeSelector | indent 8 }} {{ toYaml .Values.pulsar_metadata.nodeSelector | indent 8 }}
{{- end }} {{- end }}
initContainers: initContainers:
{{- if .Values.tls.toolset.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.pulsar_metadata.image "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.pulsar_metadata.image "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.toolset.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.toolset.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if and .Values.components.zookeeper .Values.pulsar_metadata.waitZookeeperTimeout (gt (.Values.pulsar_metadata.waitZookeeperTimeout | int) 0) }} {{- if and .Values.components.zookeeper .Values.pulsar_metadata.waitZookeeperTimeout (gt (.Values.pulsar_metadata.waitZookeeperTimeout | int) 0) }}
{{- if .Values.pulsar_metadata.configurationStore }} {{- if .Values.pulsar_metadata.configurationStore }}
- name: wait-zk-cs-ready - name: wait-zk-cs-ready
@ -65,7 +49,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.pulsar_metadata.waitZookeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.pulsar_metadata.waitZookeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
until nslookup {{ .Values.pulsar_metadata.configurationStore}}; do until nslookup {{ .Values.pulsar_metadata.configurationStore}}; do
sleep 3; sleep 3;
done; done;
@ -76,7 +60,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.pulsar_metadata.waitZookeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.pulsar_metadata.waitZookeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
{{- if $zk := .Values.pulsar_metadata.userProvidedZookeepers }} {{- if $zk := .Values.pulsar_metadata.userProvidedZookeepers }}
export PULSAR_MEM="-Xmx128M"; export PULSAR_MEM="-Xmx128M";
until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do until timeout 15 bin/pulsar zookeeper-shell -server {{ $zk }} ls {{ or .Values.metadataPrefix "/" }}; do
@ -95,7 +79,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.pulsar_metadata.waitOxiaTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.pulsar_metadata.waitOxiaTimeout }}", "sh", "-c"]
args: args:
- | - >-
until nslookup {{ template "pulsar.oxia.server.service" . }}; do until nslookup {{ template "pulsar.oxia.server.service" . }}; do
sleep 3; sleep 3;
done; done;
@ -109,7 +93,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }} resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["timeout", "{{ .Values.pulsar_metadata.waitBookkeeperTimeout }}", "sh", "-c"] command: ["timeout", "{{ .Values.pulsar_metadata.waitBookkeeperTimeout }}", "sh", "-c"]
args: args:
- | - >-
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
echo Default BOOKIE_MEM settings are set very high, which can cause the init container to fail.; echo Default BOOKIE_MEM settings are set very high, which can cause the init container to fail.;
echo Setting the memory to a lower value to avoid OOM as operations below are not memory intensive.; echo Setting the memory to a lower value to avoid OOM as operations below are not memory intensive.;
@ -135,7 +119,7 @@ spec:
command: ["timeout", "{{ .Values.pulsar_metadata.initTimeout | default 60 }}", "sh", "-c"] command: ["timeout", "{{ .Values.pulsar_metadata.initTimeout | default 60 }}", "sh", "-c"]
{{- if .Values.components.zookeeper }} {{- if .Values.components.zookeeper }}
args: args:
- | # Use the pipe character for the YAML multiline string. Workaround for kubernetes-sigs/kustomize#4201 - >-
{{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 12 }} {{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 12 }}
export PULSAR_MEM="-Xmx128M"; export PULSAR_MEM="-Xmx128M";
bin/pulsar initialize-cluster-metadata \ bin/pulsar initialize-cluster-metadata \
@ -155,7 +139,7 @@ spec:
{{- end }} {{- end }}
{{- else if .Values.components.oxia }} {{- else if .Values.components.oxia }}
args: args:
- | # Use the pipe character for the YAML multiline string. Workaround for kubernetes-sigs/kustomize#4201 - >-
export PULSAR_MEM="-Xmx128M"; export PULSAR_MEM="-Xmx128M";
bin/pulsar initialize-cluster-metadata \ bin/pulsar initialize-cluster-metadata \
--cluster {{ template "pulsar.cluster.name" . }} \ --cluster {{ template "pulsar.cluster.name" . }} \

View File

@ -24,8 +24,12 @@ metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.pulsar_manager.component }}-secret" name: "{{ template "pulsar.fullname" . }}-{{ .Values.pulsar_manager.component }}-secret"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.pulsar_manager.component }} component: {{ .Values.pulsar_manager.component }}
cluster: {{ template "pulsar.fullname" . }}
"helm.sh/resource-policy": "keep" # do not remove when uninstalling to keep it for next install "helm.sh/resource-policy": "keep" # do not remove when uninstalling to keep it for next install
type: Opaque type: Opaque
data: data:

View File

@ -32,10 +32,6 @@ spec:
ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }} ttlSecondsAfterFinished: {{ .Values.job.ttl.secondsAfterFinished | default 600 }}
{{- end }} {{- end }}
template: template:
metadata:
labels:
{{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.pulsar_manager.component }}-init
spec: spec:
{{- include "pulsar.imagePullSecrets" . | nindent 6 }} {{- include "pulsar.imagePullSecrets" . | nindent 6 }}
nodeSelector: nodeSelector:
@ -68,7 +64,7 @@ spec:
resources: {{ toYaml .Values.initContainer.resources | nindent 12 }} resources: {{ toYaml .Values.initContainer.resources | nindent 12 }}
command: [ "sh", "-c" ] command: [ "sh", "-c" ]
args: args:
- | - >-
set -e; set -e;
brokerServiceNumber="$(nslookup -timeout=10 {{ template "pulsar.fullname" . }}-{{ .Values.broker.component }} | grep Name | wc -l)"; brokerServiceNumber="$(nslookup -timeout=10 {{ template "pulsar.fullname" . }}-{{ .Values.broker.component }} | grep Name | wc -l)";
until [ ${brokerServiceNumber} -ge 1 ]; do until [ ${brokerServiceNumber} -ge 1 ]; do

View File

@ -31,7 +31,7 @@ data:
PULSAR_MANAGER_OPTS: "-Dlog4j2.formatMsgNoLookups=true" PULSAR_MANAGER_OPTS: "-Dlog4j2.formatMsgNoLookups=true"
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
# auth # auth
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
{{- if .Values.auth.authentication.jwt.usingSecretKey }} {{- if .Values.auth.authentication.jwt.usingSecretKey }}
SECRET_KEY: "file:///pulsar-manager/keys/token/secret.key" SECRET_KEY: "file:///pulsar-manager/keys/token/secret.key"
{{- else }} {{- else }}

View File

@ -26,10 +26,8 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.pulsar_manager.component }} component: {{ .Values.pulsar_manager.component }}
{{- with .Values.pulsar_manager.service.annotations }}
annotations: annotations:
{{ toYaml . | indent 4 }} {{ toYaml .Values.pulsar_manager.service.annotations | indent 4 }}
{{- end }}
spec: spec:
type: {{ .Values.pulsar_manager.service.type }} type: {{ .Values.pulsar_manager.service.type }}
{{- if .Values.pulsar_manager.service.externalTrafficPolicy }} {{- if .Values.pulsar_manager.service.externalTrafficPolicy }}
@ -60,10 +58,8 @@ metadata:
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.pulsar_manager.component }} component: {{ .Values.pulsar_manager.component }}
{{- with .Values.pulsar_manager.adminService.annotations }}
annotations: annotations:
{{ toYaml . | indent 4 }} {{ toYaml .Values.pulsar_manager.adminService.annotations | indent 4 }}
{{- end }}
spec: spec:
type: {{ .Values.pulsar_manager.adminService.type }} type: {{ .Values.pulsar_manager.adminService.type }}
ports: ports:

View File

@ -23,7 +23,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.pulsar_manager.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.pulsar_manager.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.pulsar_manager.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.pulsar_manager.component }} component: {{ .Values.pulsar_manager.component }}
@ -82,7 +81,7 @@ spec:
{{ toYaml .Values.pulsar_manager.extraVolumeMounts | indent 10 }} {{ toYaml .Values.pulsar_manager.extraVolumeMounts | indent 10 }}
{{- end }} {{- end }}
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- name: pulsar-manager-keys - name: pulsar-manager-keys
mountPath: /pulsar-manager/keys mountPath: /pulsar-manager/keys
{{- end }} {{- end }}
@ -110,7 +109,7 @@ spec:
{{- end }} {{- end }}
key: DB_PASSWORD key: DB_PASSWORD
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
{{- if .Values.auth.superUsers.manager }} {{- if .Values.auth.superUsers.manager }}
- name: JWT_TOKEN - name: JWT_TOKEN
valueFrom: valueFrom:
@ -126,7 +125,7 @@ spec:
{{ toYaml .Values.pulsar_manager.extraVolumes | indent 8 }} {{ toYaml .Values.pulsar_manager.extraVolumes | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- name: pulsar-manager-keys - name: pulsar-manager-keys
secret: secret:
defaultMode: 420 defaultMode: 420

View File

@ -24,8 +24,6 @@ kind: Issuer
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
spec: spec:
selfSigned: {} selfSigned: {}
--- ---
@ -34,10 +32,8 @@ kind: Certificate
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-ca" name: "{{ template "pulsar.fullname" . }}-ca"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
spec: spec:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
commonName: "{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}" commonName: "{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
duration: "{{ .Values.certs.internal_issuer.duration }}" duration: "{{ .Values.certs.internal_issuer.duration }}"
renewBefore: "{{ .Values.certs.internal_issuer.renewBefore }}" renewBefore: "{{ .Values.certs.internal_issuer.renewBefore }}"
@ -54,15 +50,23 @@ spec:
# if you are using an external issuer, change this to that issuer group. # if you are using an external issuer, change this to that issuer group.
group: cert-manager.io group: cert-manager.io
--- ---
{{- end }}
apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}" apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Issuer kind: Issuer
metadata: metadata:
name: "{{ template "pulsar.certs.issuers.ca.name" . }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
labels:
{{- include "pulsar.standardLabels" . | nindent 4 }}
spec: spec:
ca: ca:
secretName: "{{ template "pulsar.certs.issuers.ca.secretName" . }}" secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Issuer
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
namespace: {{ template "pulsar.namespace" . }}
spec:
ca:
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
{{- end }} {{- end }}

View File

@ -18,30 +18,328 @@
# #
{{- if .Values.tls.enabled }} {{- if .Values.tls.enabled }}
{{- if .Values.certs.internal_issuer.enabled }}
{{- if .Values.tls.proxy.enabled }} {{- if .Values.tls.proxy.enabled }}
{{- if .Values.tls.proxy.createCert }} {{- if .Values.tls.proxy.createCert }}
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.proxy "tlsConfig" .Values.tls.proxy) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.proxy.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.proxy.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
# At least one of a DNS Name, USI SAN, or IP address is required.
dnsNames:
{{- if .Values.tls.proxy.dnsNames }}
{{ toYaml .Values.tls.proxy.dnsNames | indent 4 }}
{{- end }}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
--- ---
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }} {{- if or .Values.tls.broker.enabled (or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled) }}
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.broker "tlsConfig" .Values.tls.broker) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.broker.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.broker.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
# At least one of a DNS Name, USI SAN, or IP address is required.
dnsNames:
{{- if .Values.tls.broker.dnsNames }}
{{ toYaml .Values.tls.broker.dnsNames | indent 4 }}
{{- end}}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
--- ---
{{- end }} {{- end }}
{{- if or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled }} {{- if or .Values.tls.bookie.enabled .Values.tls.zookeeper.enabled }}
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.bookkeeper "tlsConfig" .Values.tls.bookie) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.bookie.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.bookie.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
dnsNames:
{{- if .Values.tls.bookie.dnsNames }}
{{ toYaml .Values.tls.bookie.dnsNames | indent 4 }}
{{- end }}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.bookkeeper.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
--- ---
{{- end }} {{- end }}
{{- if .Values.tls.zookeeper.enabled }} {{- if .Values.tls.zookeeper.enabled }}
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.autorecovery "tlsConfig" .Values.tls.autorecovery) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.autorecovery.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.autorecovery.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
dnsNames:
{{- if .Values.tls.autorecovery.dnsNames }}
{{ toYaml .Values.tls.autorecovery.dnsNames | indent 4 }}
{{- end }}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.autorecovery.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
--- ---
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.toolset "tlsConfig" .Values.tls.toolset) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.toolset.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.toolset.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
dnsNames:
{{- if .Values.tls.toolset.dnsNames }}
{{ toYaml .Values.tls.toolset.dnsNames | indent 4 }}
{{- end }}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
--- ---
{{ include "pulsar.cert.template" (dict "root" . "componentConfig" .Values.zookeeper "tlsConfig" .Values.tls.zookeeper) }} apiVersion: "{{ .Values.certs.internal_issuer.apiVersion }}"
kind: Certificate
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.tls.zookeeper.cert_name }}"
namespace: {{ template "pulsar.namespace" . }}
spec:
# Secret names are always required.
secretName: "{{ .Release.Name }}-{{ .Values.tls.zookeeper.cert_name }}"
duration: "{{ .Values.tls.common.duration }}"
renewBefore: "{{ .Values.tls.common.renewBefore }}"
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
subject:
organizations:
{{ toYaml .Values.tls.common.organization | indent 4 }}
{{- else }}
organization:
{{ toYaml .Values.tls.common.organization | indent 2 }}
{{- end }}
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
isCA: false
{{- if eq .Values.certs.internal_issuer.apiVersion "cert-manager.io/v1" }}
privateKey:
size: {{ .Values.tls.common.keySize }}
algorithm: {{ .Values.tls.common.keyAlgorithm }}
encoding: {{ .Values.tls.common.keyEncoding }}
{{- else }}
keySize: {{ .Values.tls.common.keySize }}
keyAlgorithm: {{ .Values.tls.common.keyAlgorithm }}
keyEncoding: {{ .Values.tls.common.keyEncoding }}
{{- end }}
usages:
- server auth
- client auth
dnsNames:
{{- if .Values.tls.zookeeper.dnsNames }}
{{ toYaml .Values.tls.zookeeper.dnsNames | indent 4 }}
{{- end }}
- "*.{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}.{{ template "pulsar.namespace" . }}.svc.{{ .Values.clusterDomain }}"
- "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
# Issuer references are always required.
issuerRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.certs.internal_issuer.component }}-ca-issuer"
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}

View File

@ -36,7 +36,7 @@ data:
brokerServiceUrl: "pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsarssl }}/" brokerServiceUrl: "pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.broker.component }}:{{ .Values.broker.ports.pulsarssl }}/"
useTls: "true" useTls: "true"
tlsAllowInsecureConnection: "false" tlsAllowInsecureConnection: "false"
tlsTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.toolset.cacerts.enabled | quote }} tlsTrustCertsFilePath: "/pulsar/certs/proxy-ca/ca.crt"
tlsEnableHostnameVerification: "false" tlsEnableHostnameVerification: "false"
{{- end }} {{- end }}
{{- if not (and .Values.tls.enabled .Values.tls.broker.enabled) }} {{- if not (and .Values.tls.enabled .Values.tls.broker.enabled) }}
@ -51,7 +51,7 @@ data:
brokerServiceUrl: "pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}:{{ .Values.proxy.ports.pulsarssl }}/" brokerServiceUrl: "pulsar+ssl://{{ template "pulsar.fullname" . }}-{{ .Values.proxy.component }}:{{ .Values.proxy.ports.pulsarssl }}/"
useTls: "true" useTls: "true"
tlsAllowInsecureConnection: "false" tlsAllowInsecureConnection: "false"
tlsTrustCertsFilePath: {{ ternary "/pulsar/certs/cacerts/ca-combined.pem" "/pulsar/certs/ca/ca.crt" .Values.tls.toolset.cacerts.enabled | quote }} tlsTrustCertsFilePath: "/pulsar/certs/proxy-ca/ca.crt"
tlsEnableHostnameVerification: "false" tlsEnableHostnameVerification: "false"
{{- end }} {{- end }}
{{- if not (and .Values.tls.enabled .Values.tls.proxy.enabled) }} {{- if not (and .Values.tls.enabled .Values.tls.proxy.enabled) }}
@ -61,7 +61,7 @@ data:
{{- end }} {{- end }}
# Authentication Settings # Authentication Settings
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
authParams: "file:///pulsar/tokens/client/token" authParams: "file:///pulsar/tokens/client/token"
authPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken" authPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
{{- end }} {{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end}}

View File

@ -23,7 +23,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.toolset.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.toolset.component }} component: {{ .Values.toolset.component }}
@ -64,20 +63,8 @@ spec:
{{- end }} {{- end }}
terminationGracePeriodSeconds: {{ .Values.toolset.gracePeriod }} terminationGracePeriodSeconds: {{ .Values.toolset.gracePeriod }}
serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}" serviceAccountName: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
initContainers:
{{- if .Values.tls.toolset.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.toolset "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.toolset "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.toolset.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.toolset.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if .Values.toolset.initContainers }} {{- if .Values.toolset.initContainers }}
initContainers:
{{- toYaml .Values.toolset.initContainers | nindent 6 }} {{- toYaml .Values.toolset.initContainers | nindent 6 }}
{{- end }} {{- end }}
containers: containers:
@ -95,37 +82,41 @@ spec:
{{- end }} {{- end }}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
{{- if .Values.toolset.additionalCommand }} {{- if .Values.toolset.additionalCommand }}
{{ .Values.toolset.additionalCommand }} {{ .Values.toolset.additionalCommand }}
{{- end }} {{- end }}
{{- if .Values.tls.toolset.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.toolset.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/client.conf; bin/apply-config-from-env.py conf/client.conf;
bin/apply-config-from-env.py conf/bookkeeper.conf; bin/apply-config-from-env.py conf/bookkeeper.conf;
{{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 10 }} {{- include "pulsar.toolset.zookeeper.tls.settings" . | nindent 10 }}
sleep 10000000000 sleep 10000000000
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end }}
envFrom: envFrom:
- configMapRef: - configMapRef:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.toolset.component }}"
volumeMounts: volumeMounts:
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- mountPath: "/pulsar/tokens" - mountPath: "/pulsar/tokens"
name: client-token name: client-token
readOnly: true readOnly: true
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.tls.enabled (or .Values.tls.broker.enabled .Values.tls.proxy.enabled) }}
- mountPath: "/pulsar/certs/proxy-ca"
name: proxy-ca
readOnly: true
{{- end}}
{{- if .Values.toolset.extraVolumeMounts }} {{- if .Values.toolset.extraVolumeMounts }}
{{ toYaml .Values.toolset.extraVolumeMounts | indent 8 }} {{ toYaml .Values.toolset.extraVolumeMounts | indent 8 }}
{{- end }} {{- end }}
{{- include "pulsar.toolset.certs.volumeMounts" . | nindent 8 }} {{- include "pulsar.toolset.certs.volumeMounts" . | nindent 8 }}
volumes: volumes:
{{- if .Values.auth.authentication.enabled }} {{- if .Values.auth.authentication.enabled }}
{{- if .Values.auth.authentication.jwt.enabled }} {{- if eq .Values.auth.authentication.provider "jwt" }}
- name: client-token - name: client-token
secret: secret:
secretName: "{{ .Release.Name }}-token-{{ .Values.auth.superUsers.client }}" secretName: "{{ .Release.Name }}-token-{{ .Values.auth.superUsers.client }}"
@ -134,6 +125,19 @@ spec:
path: client/token path: client/token
{{- end}} {{- end}}
{{- end}} {{- end}}
{{- if and .Values.tls.enabled (or .Values.tls.broker.enabled .Values.tls.proxy.enabled) }}
- name: proxy-ca
secret:
{{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items:
- key: ca.crt
path: ca.crt
{{- end}}
{{- if .Values.toolset.extraVolumes }} {{- if .Values.toolset.extraVolumes }}
{{ toYaml .Values.toolset.extraVolumes | indent 6 }} {{ toYaml .Values.toolset.extraVolumes | indent 6 }}
{{- end }} {{- end }}

View File

@ -20,6 +20,41 @@
# deploy zookeeper PodMonitor only when `$.Values.zookeeper.podMonitor.enabled` is true # deploy zookeeper PodMonitor only when `$.Values.zookeeper.podMonitor.enabled` is true
{{- if .Values.components.zookeeper }} {{- if .Values.components.zookeeper }}
{{- if $.Values.zookeeper.podMonitor.enabled }} {{- if $.Values.zookeeper.podMonitor.enabled }}
{{- include "pulsar.podMonitor" (list . "zookeeper" (printf "component: %s" .Values.zookeeper.component)) }} apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-zookeeper
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: zookeeper
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.zookeeper.podMonitor.interval }}
scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.zookeeper.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.zookeeper.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: zookeeper
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
rules:
- apiGroups:
- policy
resourceNames:
- "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
subjects:
- kind: ServiceAccount
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
namespace: {{ template "pulsar.namespace" . }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
{{- if .Values.rbac.limit_to_namespace }}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ template "pulsar.namespace" . }}"
{{- else}}
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
{{- end}}
spec:
readOnlyRootFilesystem: false
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: 'RunAsAny'
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
{{- end}}

View File

@ -28,10 +28,7 @@ metadata:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.zookeeper.component }} component: {{ .Values.zookeeper.component }}
annotations: annotations:
{{- with .Values.zookeeper.service.annotations }} {{ toYaml .Values.zookeeper.service.annotations | indent 4 }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec: spec:
ports: ports:
# prometheus needs to access /metrics endpoint # prometheus needs to access /metrics endpoint

View File

@ -24,7 +24,6 @@ kind: StatefulSet
metadata: metadata:
name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}" name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}"
namespace: {{ template "pulsar.namespace" . }} namespace: {{ template "pulsar.namespace" . }}
annotations: {{ .Values.zookeeper.appAnnotations | toYaml | nindent 4 }}
labels: labels:
{{- include "pulsar.standardLabels" . | nindent 4 }} {{- include "pulsar.standardLabels" . | nindent 4 }}
component: {{ .Values.zookeeper.component }} component: {{ .Values.zookeeper.component }}
@ -44,10 +43,6 @@ spec:
{{- include "pulsar.template.labels" . | nindent 8 }} {{- include "pulsar.template.labels" . | nindent 8 }}
component: {{ .Values.zookeeper.component }} component: {{ .Values.zookeeper.component }}
annotations: annotations:
{{- if not .Values.zookeeper.podMonitor.enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.zookeeper.ports.http }}"
{{- end }}
{{- if .Values.zookeeper.restartPodsOnConfigMapChange }} {{- if .Values.zookeeper.restartPodsOnConfigMapChange }}
checksum/config: {{ include (print $.Template.BasePath "/zookeeper-configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/zookeeper-configmap.yaml") . | sha256sum }}
{{- end }} {{- end }}
@ -114,20 +109,8 @@ spec:
securityContext: securityContext:
{{ toYaml .Values.zookeeper.securityContext | indent 8 }} {{ toYaml .Values.zookeeper.securityContext | indent 8 }}
{{- end }} {{- end }}
initContainers:
{{- if .Values.tls.zookeeper.cacerts.enabled }}
- name: cacerts
image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.zookeeper "root" .) }}"
imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.zookeeper "root" .) }}"
resources: {{ toYaml .Values.initContainer.resources | nindent 10 }}
command: ["sh", "-c"]
args:
- |
bin/certs-combine-pem.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.zookeeper.cacerts.certs) }}
volumeMounts:
{{- include "pulsar.zookeeper.certs.volumeMounts" . | nindent 8 }}
{{- end }}
{{- if .Values.zookeeper.initContainers }} {{- if .Values.zookeeper.initContainers }}
initContainers:
{{- toYaml .Values.zookeeper.initContainers | nindent 6 }} {{- toYaml .Values.zookeeper.initContainers | nindent 6 }}
{{- end }} {{- end }}
containers: containers:
@ -140,15 +123,10 @@ spec:
{{- end }} {{- end }}
command: ["sh", "-c"] command: ["sh", "-c"]
args: args:
- | - >
{{- if .Values.zookeeper.additionalCommand }} {{- if .Values.zookeeper.additionalCommand }}
{{ .Values.zookeeper.additionalCommand }} {{ .Values.zookeeper.additionalCommand }}
{{- end }} {{- end }}
{{- if .Values.tls.zookeeper.cacerts.enabled }}
cd /pulsar/certs/cacerts;
nohup /pulsar/bin/certs-combine-pem-infinity.sh /pulsar/certs/cacerts/ca-combined.pem {{ template "pulsar.certs.cacerts" (dict "certs" .Values.tls.zookeeper.cacerts.certs) }} > /pulsar/certs/cacerts/certs-combine-pem-infinity.log 2>&1 &
cd /pulsar;
{{- end }}
bin/apply-config-from-env.py conf/zookeeper.conf; bin/apply-config-from-env.py conf/zookeeper.conf;
{{- include "pulsar.zookeeper.tls.settings" . | nindent 10 }} {{- include "pulsar.zookeeper.tls.settings" . | nindent 10 }}
bin/generate-zookeeper-config.sh conf/zookeeper.conf; bin/generate-zookeeper-config.sh conf/zookeeper.conf;
@ -195,6 +173,10 @@ spec:
{{- $zkConnectCommand = print "nc 127.0.0.1 " .Values.zookeeper.ports.client -}} {{- $zkConnectCommand = print "nc 127.0.0.1 " .Values.zookeeper.ports.client -}}
{{- end }} {{- end }}
{{- if .Values.zookeeper.probe.readiness.enabled }} {{- if .Values.zookeeper.probe.readiness.enabled }}
{{- if and (semverCompare "<1.25-0" .Capabilities.KubeVersion.Version) .Values.rbac.enabled .Values.rbac.psp }}
securityContext:
readOnlyRootFilesystem: false
{{- end}}
readinessProbe: readinessProbe:
exec: exec:
command: command:
@ -237,7 +219,17 @@ spec:
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ .Values.zookeeper.volumes.datalog.name }}" - name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ .Values.zookeeper.volumes.datalog.name }}"
mountPath: /pulsar/data-log mountPath: /pulsar/data-log
{{- end }} {{- end }}
{{- include "pulsar.zookeeper.certs.volumeMounts" . | nindent 8 }} {{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
- mountPath: "/pulsar/certs/zookeeper"
name: zookeeper-certs
readOnly: true
- mountPath: "/pulsar/certs/ca"
name: ca
readOnly: true
- name: keytool
mountPath: "/pulsar/keytool/keytool.sh"
subPath: keytool.sh
{{- end }}
{{- if .Values.zookeeper.extraVolumeMounts }} {{- if .Values.zookeeper.extraVolumeMounts }}
{{ toYaml .Values.zookeeper.extraVolumeMounts | indent 8 }} {{ toYaml .Values.zookeeper.extraVolumeMounts | indent 8 }}
{{- end }} {{- end }}
@ -246,10 +238,34 @@ spec:
- name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ .Values.zookeeper.volumes.data.name }}" - name: "{{ template "pulsar.fullname" . }}-{{ .Values.zookeeper.component }}-{{ .Values.zookeeper.volumes.data.name }}"
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
{{- include "pulsar.zookeeper.certs.volumes" . | nindent 6 }}
{{- if .Values.zookeeper.extraVolumes }} {{- if .Values.zookeeper.extraVolumes }}
{{ toYaml .Values.zookeeper.extraVolumes | indent 6 }} {{ toYaml .Values.zookeeper.extraVolumes | indent 6 }}
{{- end }} {{- end }}
{{- if and .Values.tls.enabled .Values.tls.zookeeper.enabled }}
- name: zookeeper-certs
secret:
secretName: "{{ .Release.Name }}-{{ .Values.tls.zookeeper.cert_name }}"
items:
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key
- name: ca
secret:
{{- if eq .Values.certs.internal_issuer.type "selfsigning" }}
secretName: "{{ .Release.Name }}-{{ .Values.tls.ca_suffix }}"
{{- end }}
{{- if eq .Values.certs.internal_issuer.type "ca" }}
secretName: "{{ .Values.certs.issuers.ca.secretName }}"
{{- end }}
items:
- key: ca.crt
path: ca.crt
- name: keytool
configMap:
name: "{{ template "pulsar.fullname" . }}-keytool-configmap"
defaultMode: 0755
{{- end}}
{{- include "pulsar.imagePullSecrets" . | nindent 6}} {{- include "pulsar.imagePullSecrets" . | nindent 6}}
{{- if and (and .Values.persistence .Values.volumes.persistence) .Values.zookeeper.volumes.persistence }} {{- if and (and .Values.persistence .Values.volumes.persistence) .Values.zookeeper.volumes.persistence }}
volumeClaimTemplates: volumeClaimTemplates:

View File

@ -21,12 +21,9 @@
### K8S Settings ### K8S Settings
### ###
### Namespace to deploy Pulsar ### Namespace to deploy pulsar
### Note: Prefer using helm's --namespace flag with --create-namespace instead # The namespace to use to deploy the pulsar components, if left empty
## The namespace to use to deploy the Pulsar components. If left empty, # will default to .Release.Namespace (aka helm --namespace).
## it will default to .Release.Namespace (aka helm --namespace).
## Please note that victoria-metrics-k8s-stack might not be able to scrape Pulsar component metrics by default unless
## it is deployed in the same namespace as Pulsar.
namespace: "" namespace: ""
namespaceCreate: false namespaceCreate: false
@ -38,7 +35,6 @@ clusterDomain: cluster.local
### ###
## Set to true on install ## Set to true on install
## There's no need to set this value unless you're using a system that doesn't track .Release.IsInstall or .Release.IsUpgrade (like argocd)
initialize: false initialize: false
## Set useReleaseStatus to false if you're deploying this chart using a system that doesn't track .Release.IsInstall or .Release.IsUpgrade (like argocd) ## Set useReleaseStatus to false if you're deploying this chart using a system that doesn't track .Release.IsInstall or .Release.IsUpgrade (like argocd)
useReleaseStatus: true useReleaseStatus: true
@ -94,8 +90,10 @@ volumes:
rbac: rbac:
enabled: false enabled: false
psp: false
limit_to_namespace: true limit_to_namespace: true
## AntiAffinity ## AntiAffinity
## ##
## Flag to enable and disable `AntiAffinity` for all components. ## Flag to enable and disable `AntiAffinity` for all components.
@ -103,8 +101,6 @@ rbac:
## If you need to disable AntiAffinity for a component, you can set ## If you need to disable AntiAffinity for a component, you can set
## the `affinity.anti_affinity` settings to `false` for that component. ## the `affinity.anti_affinity` settings to `false` for that component.
affinity: affinity:
## When set to true, the scheduler will try to spread pods across different nodes.
## It is necessary to set this to false if you're using a Kubernetes cluster with less than 3 nodes, such as local development environments.
anti_affinity: true anti_affinity: true
# Set the anti affinity type. Valid values: # Set the anti affinity type. Valid values:
# requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod to be scheduled (hard) requires at least one node per replica
@ -210,8 +206,8 @@ images:
hasCommand: false hasCommand: false
oxia: oxia:
repository: streamnative/oxia repository: streamnative/oxia
tag: 0.12.0 tag: 0.11.9
pullPolicy: pullPolicy: Always
## TLS ## TLS
## templates/tls-certs.yaml ## templates/tls-certs.yaml
@ -241,13 +237,6 @@ tls:
# The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate. # The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate.
dnsNames: dnsNames:
# - example.com # - example.com
cacerts:
enabled: false
certs:
# - name: proxy-cacert
# existingSecret: proxy-cacert
# secretKeys:
# - ca.crt
# settings for generating certs for broker # settings for generating certs for broker
broker: broker:
enabled: false enabled: false
@ -255,96 +244,37 @@ tls:
# The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate. # The dnsNames field specifies a list of Subject Alternative Names to be associated with the certificate.
dnsNames: dnsNames:
# - example.com # - example.com
cacerts:
enabled: false
certs:
# - name: broker-cacert
# existingSecret: broker-cacert
# secretKeys:
# - ca.crt
# settings for generating certs for bookies # settings for generating certs for bookies
bookie: bookie:
enabled: false enabled: false
cert_name: tls-bookie cert_name: tls-bookie
cacerts:
enabled: false
certs:
# - name: bookie-cacert
# existingSecret: bookie-cacert
# secretKeys:
# - ca.crt
# settings for generating certs for zookeeper # settings for generating certs for zookeeper
zookeeper: zookeeper:
enabled: false enabled: false
cert_name: tls-zookeeper cert_name: tls-zookeeper
cacerts:
enabled: false
certs:
# - name: zookeeper-cacert
# existingSecret: zookeeper-cacert
# secretKeys:
# - ca.crt
# settings for generating certs for recovery # settings for generating certs for recovery
autorecovery: autorecovery:
cert_name: tls-recovery cert_name: tls-recovery
cacerts:
enabled: false
certs:
# - name: autorecovery-cacert
# existingSecret: autorecovery-cacert
# secretKeys:
# - ca.crt
# settings for generating certs for toolset # settings for generating certs for toolset
toolset: toolset:
cert_name: tls-toolset cert_name: tls-toolset
cacerts:
enabled: false
certs:
# - name: toolset-cacert
# existingSecret: toolset-cacert
# secretKeys:
# - ca.crt
# TLS setting for function runtime instance # TLS setting for function runtime instance
function_instance: function_instance:
# controls the use of TLS for function runtime connections towards brokers # controls the use of TLS for function runtime connections towards brokers
enabled: false enabled: false
oxia: oxia:
enabled: false enabled: false
pulsar_metadata:
cacerts:
enabled: false
certs:
# - name: pulsar-metadata-cacert
# existingSecret: pulsar-metadata-cacert
# secretKeys:
# - ca.crt
# Enable or disable broker authentication and authorization. # Enable or disable broker authentication and authorization.
auth: auth:
authentication: authentication:
enabled: false enabled: false
provider: "jwt"
jwt: jwt:
enabled: false
# Enable JWT authentication # Enable JWT authentication
# If the token is generated by a secret key, set the usingSecretKey as true. # If the token is generated by a secret key, set the usingSecretKey as true.
# If the token is generated by a private key, set the usingSecretKey as false. # If the token is generated by a private key, set the usingSecretKey as false.
usingSecretKey: false usingSecretKey: false
openid:
enabled: false
# # https://pulsar.apache.org/docs/next/security-openid-connect/#enable-openid-connect-authentication-in-the-broker-and-proxy
openIDAllowedTokenIssuers: []
openIDAllowedAudiences: []
openIDTokenIssuerTrustCertsFilePath:
openIDRoleClaim:
openIDAcceptedTimeLeewaySeconds: "0"
openIDCacheSize: "5"
openIDCacheRefreshAfterWriteSeconds: "64800"
openIDCacheExpirationSeconds: "86400"
openIDHttpConnectionTimeoutMillis: "10000"
openIDHttpReadTimeoutMillis: "10000"
openIDKeyIdCacheMissRefreshSeconds: "300"
openIDRequireIssuersUseHttps: "true"
openIDFallbackDiscoveryMode: "DISABLED"
authorization: authorization:
enabled: false enabled: false
superUsers: superUsers:
@ -365,15 +295,13 @@ auth:
###################################################################### ######################################################################
## cert-manager ## cert-manager
## templates/tls-cert-internal-issuer.yaml ## templates/tls-cert-issuer.yaml
## ##
## Cert manager is used for automatically provisioning TLS certificates ## Cert manager is used for automatically provisioning TLS certificates
## for components within a Pulsar cluster ## for components within a Pulsar cluster
certs: certs:
internal_issuer: internal_issuer:
apiVersion: cert-manager.io/v1 apiVersion: cert-manager.io/v1
# To enable internal issuer for TLS certificates, set this to true
# It is necessary to have cert-manager installed in the cluster
enabled: false enabled: false
component: internal-cert-issuer component: internal-cert-issuer
# The type of issuer, supports selfsigning and ca # The type of issuer, supports selfsigning and ca
@ -383,19 +311,10 @@ certs:
# 15d # 15d
renewBefore: 360h renewBefore: 360h
issuers: issuers:
# Used for certs.internal_issuer.type as selfsigning # Used for certs.type as selfsigning, the selfsigned issuer has no dependency on any other resource.
selfsigning: selfsigning:
# The name of the issuer, if not specified, the default value is used # used for certs.type as ca, the CA issuer needs to reference a Secret which contains your CA certificate and signing private key.
name:
# The secret name of the selfsigned CA certificate, if not specified, the default value is used
secretName:
# used for certs.internal_issuer.type as ca or when internal_issuer is disabled
ca: ca:
# The name of the issuer, it is mandatory to specify this value if TLS is enabled
# and selfsigning is not used
name:
# The secret name of the CA certificate, it is mandatory to specify this value if TLS is enabled
# and selfsigning is not used
secretName: secretName:
###################################################################### ######################################################################
@ -415,7 +334,7 @@ zookeeper:
type: RollingUpdate type: RollingUpdate
podManagementPolicy: Parallel podManagementPolicy: Parallel
initContainers: [] initContainers: []
# This is how Victoria Metrics or Prometheus discovers this component # This is how prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -462,8 +381,6 @@ zookeeper:
type: requiredDuringSchedulingIgnoredDuringExecution type: requiredDuringSchedulingIgnoredDuringExecution
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -578,11 +495,7 @@ oxia:
replicationFactor: 3 replicationFactor: 3
## templates/coordinator-deployment.yaml ## templates/coordinator-deployment.yaml
coordinator: coordinator:
# annotations for the app (statefulset/deployment) # This is how prometheus discovers this component
appAnnotations: {}
# pods annotations
annotations: {}
# This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -602,18 +515,9 @@ oxia:
tolerations: [] tolerations: []
# nodeSelector: # nodeSelector:
# cloud.google.com/gke-nodepool: default-pool # cloud.google.com/gke-nodepool: default-pool
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
# customConfigMapName: ""
# entrypoint: []
## templates/server-statefulset.yaml ## templates/server-statefulset.yaml
server: server:
# annotations for the app (statefulset/deployment) # This is how prometheus discovers this component
appAnnotations: {}
# pods annotations
annotations: {}
# This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -686,7 +590,7 @@ bookkeeper:
type: RollingUpdate type: RollingUpdate
podManagementPolicy: Parallel podManagementPolicy: Parallel
initContainers: [] initContainers: []
# This is how Victoria Metrics or Prometheus discovers this component # This is how prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -730,8 +634,6 @@ bookkeeper:
type: requiredDuringSchedulingIgnoredDuringExecution type: requiredDuringSchedulingIgnoredDuringExecution
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -899,7 +801,7 @@ autorecovery:
component: recovery component: recovery
replicaCount: 1 replicaCount: 1
initContainers: [] initContainers: []
# This is how Victoria Metrics or Prometheus discovers this component # This is how prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -922,8 +824,6 @@ autorecovery:
type: requiredDuringSchedulingIgnoredDuringExecution type: requiredDuringSchedulingIgnoredDuringExecution
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
# tolerations: [] # tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -933,10 +833,6 @@ autorecovery:
requests: requests:
memory: 64Mi memory: 64Mi
cpu: 0.05 cpu: 0.05
## Bookkeeper auto-recovery service
## templates/autorecovery-service.yaml
service:
annotations: {}
## Bookkeeper auto-recovery service account ## Bookkeeper auto-recovery service account
## templates/autorecovery-service-account.yaml ## templates/autorecovery-service-account.yaml
service_account: service_account:
@ -948,8 +844,6 @@ autorecovery:
BOOKIE_MEM: > BOOKIE_MEM: >
-Xms64m -Xmx64m -Xms64m -Xmx64m
PULSAR_PREFIX_useV2WireProtocol: "true" PULSAR_PREFIX_useV2WireProtocol: "true"
extraVolumes: []
extraVolumeMounts: []
## Pulsar Zookeeper metadata. The metadata will be deployed as ## Pulsar Zookeeper metadata. The metadata will be deployed as
## soon as the last zookeeper node is reachable. The deployment ## soon as the last zookeeper node is reachable. The deployment
@ -982,52 +876,6 @@ pulsar_metadata:
## Timeout for running metadata initialization ## Timeout for running metadata initialization
initTimeout: 60 initTimeout: 60
## Allow read-only operations on the metadata store when the metadata store is not available.
## This is useful when you want to continue serving requests even if the metadata store is not fully available with quorum.
metadataStoreAllowReadOnlyOperations: false
## The session timeout for the metadata store in milliseconds.
metadataStoreSessionTimeoutMillis: 30000
## Metadata store operation timeout in seconds.
metadataStoreOperationTimeoutSeconds: 30
## The expiry time for the metadata store cache in seconds.
metadataStoreCacheExpirySeconds: 300
## Whether we should enable metadata operations batching
metadataStoreBatchingEnabled: true
## Maximum delay to impose on batching grouping (in milliseconds)
metadataStoreBatchingMaxDelayMillis: 5
## Maximum number of operations to include in a singular batch
metadataStoreBatchingMaxOperations: 1000
## Maximum size of a batch (in KB)
metadataStoreBatchingMaxSizeKb: 128
## BookKeeper client and BookKeeper metadata configuration settings with Pulsar Helm Chart deployments
bookkeeper:
## Controls whether to use the PIP-45 metadata driver (PulsarMetadataClientDriver) for BookKeeper client
## in the Pulsar Broker when using ZooKeeper as a metadata store.
## This is setting applies to Pulsar Broker's BookKeeper client.
## When set to true, Pulsar Broker's BookKeeper client will use the PIP-45 metadata driver (PulsarMetadataBookieDriver).
## When set to false, Pulsar Broker's BookKeeper client will use BookKeeper's default ZooKeeper connection implementation.
usePulsarMetadataClientDriver: false
## Controls whether to use the PIP-45 metadata driver (PulsarMetadataBookieDriver) for BookKeeper components
## when using ZooKeeper as a metadata store.
## This is a global setting that applies to all BookKeeper components.
## When set to true, BookKeeper components will use the PIP-45 metadata driver (PulsarMetadataBookieDriver).
## When set to false, BookKeeper components will use BookKeeper's default ZooKeeper connection implementation.
## Warning: Do not enable this feature unless you are aware of the risks and have tested it in non-production environments.
usePulsarMetadataBookieDriver: false
## The session timeout for the metadata store in milliseconds. This setting is mapped to `zkTimeout` in `bookkeeper.conf`.
## due to implementation details in the PulsarMetadataBookieDriver, it also applies when Oxia metadata store is enabled.
metadataStoreSessionTimeoutMillis: 30000
# resources for bin/pulsar initialize-cluster-metadata # resources for bin/pulsar initialize-cluster-metadata
resources: resources:
# requests: # requests:
@ -1068,21 +916,11 @@ broker:
# The podManagementPolicy cannot be modified for an existing deployment. If you need to change this value, you will need to manually delete the existing broker StatefulSet and then redeploy the chart. # The podManagementPolicy cannot be modified for an existing deployment. If you need to change this value, you will need to manually delete the existing broker StatefulSet and then redeploy the chart.
podManagementPolicy: podManagementPolicy:
initContainers: [] initContainers: []
# This is how Victoria Metrics or Prometheus discovers this component # This is how prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
scrapeTimeout: 60s scrapeTimeout: 60s
# Removes metrics that end with _created suffix
# These metrics are automatically generated by the Prometheus client library to comply with OpenMetrics format
# and aren't currently used. Disable this if you need to use these metrics or add an exclusion pattern when
# a specific metric is needed.
dropUnderscoreCreatedMetrics:
enabled: true
# Optional regex pattern to exclude specific metrics from being dropped
# excludePatterns:
# - pulsar_topic_load_times_created
# Custom metric relabelings to apply to all metrics
metricRelabelings: metricRelabelings:
# - action: labeldrop # - action: labeldrop
# regex: cluster # regex: cluster
@ -1123,8 +961,6 @@ broker:
type: preferredDuringSchedulingIgnoredDuringExecution type: preferredDuringSchedulingIgnoredDuringExecution
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -1180,9 +1016,9 @@ broker:
-XX:-ResizePLAB -XX:-ResizePLAB
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+PerfDisableSharedMem -XX:+PerfDisableSharedMem
managedLedgerDefaultEnsembleSize: "2" managedLedgerDefaultEnsembleSize: "1"
managedLedgerDefaultWriteQuorum: "2" managedLedgerDefaultWriteQuorum: "1"
managedLedgerDefaultAckQuorum: "2" managedLedgerDefaultAckQuorum: "1"
## Add a custom command to the start up process of the broker pods (e.g. update-ca-certificates, jvm commands, etc) ## Add a custom command to the start up process of the broker pods (e.g. update-ca-certificates, jvm commands, etc)
additionalCommand: additionalCommand:
@ -1327,21 +1163,11 @@ proxy:
metrics: ~ metrics: ~
behavior: ~ behavior: ~
initContainers: [] initContainers: []
# This is how Victoria Metrics or Prometheus discovers this component # This is how prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
scrapeTimeout: 60s scrapeTimeout: 60s
# Removes metrics that end with _created suffix
# These metrics are automatically generated by the Prometheus client library to comply with OpenMetrics format
# and aren't currently used. Disable this if you need to use these metrics or add an exclusion pattern when
# a specific metric is needed.
dropUnderscoreCreatedMetrics:
enabled: true
# Optional regex pattern to exclude specific metrics from being dropped
# excludePatterns:
# - pulsar_proxy_new_connections_created
# Custom metric relabelings to apply to all metrics
metricRelabelings: metricRelabelings:
# - action: labeldrop # - action: labeldrop
# regex: cluster # regex: cluster
@ -1377,8 +1203,6 @@ proxy:
type: requiredDuringSchedulingIgnoredDuringExecution type: requiredDuringSchedulingIgnoredDuringExecution
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -1451,48 +1275,8 @@ proxy:
http: 8080 http: 8080
https: 8443 https: 8443
service: service:
# Service type defaults to ClusterIP for security reasons. annotations: {}
# type: LoadBalancer
# SECURITY NOTICE: The Pulsar proxy is not designed for direct public internet exposure
# (see https://pulsar.apache.org/docs/4.0.x/administration-proxy/).
#
# If you need to expose the proxy outside of the cluster using a LoadBalancer service type:
# 1. Set type to LoadBalancer only in secured environments with proper network controls.
# In cloud managed Kubernetes clusters, make sure to add annotations to the service to create an
# internal load balancer so that the load balancer is not exposed to the public internet.
# You must also ensure that the configuration is correct so that the load balancer is not exposed to the public internet.
# 2. Configure authentication and authorization
# 3. Use TLS for all connections
# 4. If you are exposing to unsecure networks, implement additional security measures like
# IP restrictions (loadBalancerSourceRanges)
#
# Please notice that the the Apache Pulsar project takes no responsibility for any security issues
# for your deployment. Exposing the cluster using Pulsar Proxy to unsecure networks is not supported.
#
# Previous chart versions defaulted to LoadBalancer which could create security risks.
type: ClusterIP
# When using a LoadBalancer service type, add internal load balancer annotations to the service to create an internal load balancer.
annotations: {
## Set internal load balancer annotations when using a LoadBalancer service type because of security reasons.
## You must also ensure that the configuration is correct so that the load balancer is not exposed to the public internet.
## This information below is for reference only and may not be applicable to your cloud provider.
## Please refer to the cloud provider's documentation for the correct annotations.
## Kubernetes documentation about internal load balancers
## https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
## AWS / EKS
## Ensure that you have recent AWS Load Balancer Controller installed.
## Docs: https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/
# service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
## Azure / AKS
## Docs: https://learn.microsoft.com/en-us/azure/aks/internal-lb
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
## GCP / GKE
## Docs: https://cloud.google.com/kubernetes-engine/docs/concepts/service-load-balancer-parameters
# networking.gke.io/load-balancer-type: "Internal"
## Allow global access to the internal load balancer when needed.
# networking.gke.io/internal-load-balancer-allow-global-access: "true"
}
## Optional. Leave it blank to get next available random IP. ## Optional. Leave it blank to get next available random IP.
loadBalancerIP: "" loadBalancerIP: ""
## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it.
@ -1547,8 +1331,6 @@ toolset:
# cloud.google.com/gke-nodepool: default-pool # cloud.google.com/gke-nodepool: default-pool
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
gracePeriod: 30 gracePeriod: 30
@ -1585,239 +1367,92 @@ toolset:
additionalCommand: additionalCommand:
############################################################# #############################################################
### Monitoring Stack : victoria-metrics-k8s-stack chart ### Monitoring Stack : kube-prometheus-stack chart
############################################################# #############################################################
## Victoria Metrics, Grafana, and the rest of the monitoring stack are managed by the dependent chart here: ## Prometheus, Grafana, and the rest of the kube-prometheus-stack are managed by the dependent chart here:
## https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack ## https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
## For sample values, please see: https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/values.yaml ## For sample values, please see their documentation.
victoria-metrics-k8s-stack: kube-prometheus-stack:
## Enable the victoria-metrics-k8s-stack chart
enabled: true enabled: true
prometheus:
## VictoriaMetrics Operator dependency chart configuration
victoria-metrics-operator:
enabled: true enabled: true
# Install CRDs for VictoriaMetrics Operator
crds:
plain: true
operator:
## By default, operator is configured to not convert Prometheus Operator monitoring.coreos.com/v1 objects
## to Victoria Metrics operator operator.victoriametrics.com/v1beta1 objects.
# Enable this if you want to use Prometheus Operator objects for other purposes.
disable_prometheus_converter: true
## Single-node VM instance
vmsingle:
enabled: true
## -- Full spec for VMSingle CRD. Allowed values describe [here](https://docs.victoriametrics.com/operator/api#vmsinglespec)
spec:
retentionPeriod: "10d"
storage:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
## VM Agent for scraping metrics
vmagent:
enabled: true
## Minikube specific settings - uncomment when using minikube
# spec:
# volumes:
# - hostPath:
# path: /var/lib/minikube/certs/etcd
# type: DirectoryOrCreate
# name: etcd-certs
# volumeMounts:
# - mountPath: /var/lib/minikube/certs/etcd
# name: etcd-certs
## VM Alert for alerting rules - disabled by default
vmalert:
enabled: false
## Alertmanager component - disabled by default
alertmanager:
enabled: false
## Grafana component
## Refer to https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
grafana: grafana:
enabled: true enabled: true
# Use random password at installation time for Grafana by default by setting empty value to `adminPassword`. # Use random password at installation time for Grafana by default by setting empty value to `adminPassword`.
# You can find out the actual password by running the following command: # You can find out the actual password by running the following command:
# kubectl get secret -l app.kubernetes.io/name=grafana -o=jsonpath="{.items[0].data.admin-password}" | base64 --decode # kubectl get secret -l app.kubernetes.io/name=grafana -o=jsonpath="{.items[0].data.admin-password}" | base64 --decode
adminPassword: adminPassword:
persistence: # Configure Pulsar dashboards for Grafana
enabled: true
size: 5Gi
## Disable Grafana sidecar dashboards
## since this cannot be enabled in the same time as dashboards are enabled
sidecar:
dashboards:
enabled: false
# grafana.ini settings
grafana.ini:
analytics:
check_for_updates: false
dashboards:
default_home_dashboard_path: /var/lib/grafana/dashboards/pulsar/overview.json
## Configure Pulsar dashboards for Grafana
dashboardProviders: dashboardProviders:
dashboardproviders.yaml: dashboardproviders.yaml:
apiVersion: 1 apiVersion: 1
providers: providers:
- name: 'default' - name: 'pulsar'
orgId: 1 orgId: 1
folder: '' folder: 'Pulsar'
type: file type: file
disableDeletion: true disableDeletion: true
editable: true editable: true
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards/default
- name: oxia
orgId: 1
folder: Oxia
type: file
disableDeletion: true
editable: true
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards/oxia
- name: pulsar
orgId: 1
folder: Pulsar
type: file
disableDeletion: true
editable: true
allowUiUpdates: true
options: options:
path: /var/lib/grafana/dashboards/pulsar path: /var/lib/grafana/dashboards/pulsar
dashboards: dashboards:
default:
victoriametrics:
gnetId: 10229
revision: 38
datasource: VictoriaMetrics
kubernetes:
gnetId: 14205
datasource: VictoriaMetrics
oxia:
oxia-containers:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-containers.json
oxia-coordinator:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-coordinator.json
oxia-golang:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-golang.json
oxia-grpc:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-grpc.json
oxia-nodes:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-nodes.json
oxia-overview:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-overview.json
oxia-shards:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-shards.json
pulsar: pulsar:
bookkeeper-compaction: # Download the maintained dashboards from AL 2.0 licenced repo https://github.com/streamnative/apache-pulsar-grafana-dashboard
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper-compaction.json
bookkeeper: bookkeeper:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/bookkeeper.json
broker-cache-by-broker: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/broker-cache-by-broker.json broker:
broker-cache: url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/broker.json
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/broker-cache.json datasource: Prometheus
connector-sink: connector_sink:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/connector-sink.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_sink.json
connector-source: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/connector-source.json connector_source:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_source.json
datasource: Prometheus
container:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/container.json
datasource: Prometheus
functions: functions:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/functions.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/functions.json
datasource: Prometheus
jvm: jvm:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/jvm.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/jvm.json
load-balancing: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/load-balancing.json loadbalance:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/loadbalance.json
datasource: Prometheus
messaging: messaging:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/messaging.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/messaging.json
namespace: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/namespace.json
node: node:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/node.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/node.json
offloader: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/offloader.json
overview-by-broker:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/overview-by-broker.json
overview: overview:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/overview.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/overview.json
datasource: Prometheus
proxy: proxy:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/proxy.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/proxy.json
sockets: datasource: Prometheus
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/sockets.json recovery:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/recovery.json
datasource: Prometheus
topic: topic:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/topic.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/topic.json
datasource: Prometheus
transaction:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/transaction.json
datasource: Prometheus
zookeeper: zookeeper:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/zookeeper.json url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/zookeeper-3.6.json
## Node exporter component datasource: Prometheus
prometheus-node-exporter: prometheus-node-exporter:
enabled: true enabled: true
hostRootFsMount: hostRootFsMount:
enabled: false enabled: false
alertmanager:
## Kube state metrics component enabled: false
kube-state-metrics:
enabled: true
## Components scraping Kubernetes services
kubelet:
enabled: true
kubeApiServer:
enabled: true
kubeControllerManager:
enabled: true
## Additional settings for minikube environments
vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true # For development environments like minikube
coreDns:
enabled: true
kubeEtcd:
enabled: true
## Minikube specific settings - uncomment or adjust when using minikube
# service:
# port: 2381
# targetPort: 2381
# vmScrape:
# spec:
# endpoints:
# - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
# port: http-metrics
# scheme: http # Minikube often uses http instead of https for etcd
kubeScheduler:
enabled: true
## Additional settings for minikube environments
vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true # For development environments like minikube
## Components Stack: pulsar_manager ## Components Stack: pulsar_manager
## templates/pulsar-manager.yaml ## templates/pulsar-manager.yaml
@ -1832,8 +1467,6 @@ pulsar_manager:
# cloud.google.com/gke-nodepool: default-pool # cloud.google.com/gke-nodepool: default-pool
# set topologySpreadConstraint to deploy pods across different zones # set topologySpreadConstraint to deploy pods across different zones
topologySpreadConstraints: [] topologySpreadConstraints: []
# annotations for the app (statefulset/deployment)
appAnnotations: {}
annotations: {} annotations: {}
tolerations: [] tolerations: []
extraVolumes: [] extraVolumes: []
@ -1939,7 +1572,3 @@ initContainer:
requests: requests:
memory: 256Mi memory: 256Mi
cpu: 0.1 cpu: 0.1
## Array of extra objects to deploy with the release (evaluated as a template)
##
extraDeploy: []

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
victoria-metrics-k8s-stack: kube-prometheus-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
victoria-metrics-k8s-stack: kube-prometheus-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -1,58 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# disable monitoring
victoria-metrics-k8s-stack:
enabled: false
victoria-metrics-operator:
enabled: false
vmsingle:
enabled: false
vmagent:
enabled: false
kube-state-metrics:
enabled: false
prometheus-node-exporter:
enabled: false
grafana:
enabled: false
# disable pod monitors
autorecovery:
podMonitor:
enabled: false
bookkeeper:
podMonitor:
enabled: false
oxia:
server:
podMonitor:
enabled: false
coordinator:
podMonitor:
enabled: false
broker:
podMonitor:
enabled: false
proxy:
podMonitor:
enabled: false
zookeeper:
podMonitor:
enabled: false

View File

@ -28,7 +28,7 @@ components:
pulsar_manager: true pulsar_manager: true
## disable monitoring stack ## disable monitoring stack
victoria-metrics-k8s-stack: kube-prometheus-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -1,46 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# disabled AntiAffinity
affinity:
anti_affinity: false
victoria-metrics-k8s-stack:
grafana:
adminPassword: verysecureword123
bookkeeper:
configData:
# more aggressive disk cleanup
journalMaxSizeMB: "256"
majorCompactionInterval: "600"
minorCompactionInterval: "300"
compactionRateByEntries: "5000"
gcWaitTime: "60000"
broker:
configData:
# more aggressive disk cleanup
managedLedgerMinLedgerRolloverTimeMinutes: "1"
managedLedgerMaxLedgerRolloverTimeMinutes: "5"
# configure deletion of inactive topics
brokerDeleteInactiveTopicsMaxInactiveDurationSeconds: "86400"
proxy:
replicaCount: 1

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
victoria-metrics-k8s-stack: kube-prometheus-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -25,14 +25,14 @@ fi
OUTPUT=${PULSAR_CHART_HOME}/output OUTPUT=${PULSAR_CHART_HOME}/output
OUTPUT_BIN=${OUTPUT}/bin OUTPUT_BIN=${OUTPUT}/bin
: "${KUBECTL_VERSION:=1.28.15}" : "${KUBECTL_VERSION:=1.23.17}"
KUBECTL_BIN=$OUTPUT_BIN/kubectl KUBECTL_BIN=$OUTPUT_BIN/kubectl
HELM_BIN=$OUTPUT_BIN/helm HELM_BIN=$OUTPUT_BIN/helm
: "${HELM_VERSION:=3.16.4}" : "${HELM_VERSION:=3.14.4}"
: "${KIND_VERSION:=0.27.0}" : "${KIND_VERSION:=0.22.0}"
KIND_BIN=$OUTPUT_BIN/kind KIND_BIN=$OUTPUT_BIN/kind
CR_BIN=$OUTPUT_BIN/cr CR_BIN=$OUTPUT_BIN/cr
: "${CR_VERSION:=1.7.0}" : "${CR_VERSION:=1.6.0}"
KUBECONFORM_BIN=$OUTPUT_BIN/kubeconform KUBECONFORM_BIN=$OUTPUT_BIN/kubeconform
: "${KUBECONFORM_VERSION:=0.6.7}" : "${KUBECONFORM_VERSION:=0.6.7}"
export PATH="$OUTPUT_BIN:$PATH" export PATH="$OUTPUT_BIN:$PATH"

View File

@ -25,7 +25,7 @@ set -e
NAMESPACE=cert-manager NAMESPACE=cert-manager
NAME=cert-manager NAME=cert-manager
# check compatibility with k8s versions from https://cert-manager.io/docs/installation/supported-releases/ # check compatibility with k8s versions from https://cert-manager.io/docs/installation/supported-releases/
VERSION=v1.12.17 VERSION=v1.12.13
# Install cert-manager CustomResourceDefinition resources # Install cert-manager CustomResourceDefinition resources
echo "Installing cert-manager CRD resources ..." echo "Installing cert-manager CRD resources ..."
@ -41,12 +41,10 @@ echo "Updating local helm chart repository cache ..."
helm repo update helm repo update
echo "Installing cert-manager ${VERSION} to namespace ${NAMESPACE} as '${NAME}' ..." echo "Installing cert-manager ${VERSION} to namespace ${NAMESPACE} as '${NAME}' ..."
helm upgrade \ helm install \
--install \
--namespace ${NAMESPACE} \ --namespace ${NAMESPACE} \
--create-namespace \ --create-namespace \
--version ${VERSION} \ --version ${VERSION} \
--set featureGates=AdditionalCertificateOutputFormats=true \
${NAME} \ ${NAME} \
jetstack/cert-manager jetstack/cert-manager
echo "Successfully installed cert-manager ${VERSION}." echo "Successfully installed cert-manager ${VERSION}."

View File

@ -21,7 +21,7 @@
# This script is used to upgrade the Prometheus Operator CRDs before running "helm upgrade" # This script is used to upgrade the Prometheus Operator CRDs before running "helm upgrade"
# source: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#upgrading-an-existing-release-to-a-new-major-version # source: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#upgrading-an-existing-release-to-a-new-major-version
# "Run these commands to update the CRDs before applying the upgrade." # "Run these commands to update the CRDs before applying the upgrade."
PROMETHEUS_OPERATOR_VERSION="${1:-"0.80.0"}" PROMETHEUS_OPERATOR_VERSION="${1:-"0.77.1"}"
PREFIX_URL="https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v${PROMETHEUS_OPERATOR_VERSION}/example/prometheus-operator-crd" PREFIX_URL="https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v${PROMETHEUS_OPERATOR_VERSION}/example/prometheus-operator-crd"
for crd in alertmanagerconfigs alertmanagers podmonitors probes prometheusagents prometheuses prometheusrules scrapeconfigs servicemonitors thanosrulers; do for crd in alertmanagerconfigs alertmanagers podmonitors probes prometheusagents prometheuses prometheusrules scrapeconfigs servicemonitors thanosrulers; do
# "--force-conflicts" is required to upgrade the CRDs. Following instructions from https://github.com/prometheus-community/helm-charts/issues/2489 # "--force-conflicts" is required to upgrade the CRDs. Following instructions from https://github.com/prometheus-community/helm-charts/issues/2489

View File

@ -18,13 +18,34 @@
# under the License. # under the License.
# #
if [ -z "$PULSAR_VERSION" ]; then if [ -z "$CHART_HOME" ]; then
if command -v yq &> /dev/null; then echo "error: CHART_HOME should be initialized"
# use yq to get the appVersion from the Chart.yaml file exit 1
PULSAR_VERSION=$(yq .appVersion charts/pulsar/Chart.yaml)
else
# use a default version if yq is not installed
PULSAR_VERSION="4.0.3"
fi
fi fi
PULSAR_TOKENS_CONTAINER_IMAGE="apachepulsar/pulsar:${PULSAR_VERSION}"
OUTPUT=${CHART_HOME}/output
OUTPUT_BIN=${OUTPUT}/bin
PULSARCTL_VERSION=v3.0.2.6
PULSARCTL_BIN=${HOME}/.pulsarctl/pulsarctl
export PATH=${HOME}/.pulsarctl/plugins:${PATH}
test -d "$OUTPUT_BIN" || mkdir -p "$OUTPUT_BIN"
function pulsar::verify_pulsarctl() {
if test -x "$PULSARCTL_BIN"; then
return
fi
return 1
}
function pulsar::ensure_pulsarctl() {
if pulsar::verify_pulsarctl; then
return 0
fi
echo "Get pulsarctl install.sh script ..."
install_script=$(mktemp)
trap "test -f $install_script && rm $install_script" RETURN
curl --retry 10 -L -o $install_script https://raw.githubusercontent.com/streamnative/pulsarctl/master/install.sh
chmod +x $install_script
$install_script --user --version ${PULSARCTL_VERSION}
}

View File

@ -20,12 +20,9 @@
set -e set -e
SCRIPT_DIR="$(unset CDPATH && cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" CHART_HOME=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/../.. && pwd)
CHART_HOME=$(unset CDPATH && cd "$SCRIPT_DIR/../.." && pwd)
cd ${CHART_HOME} cd ${CHART_HOME}
source "${SCRIPT_DIR}/common_auth.sh"
usage() { usage() {
cat <<EOF cat <<EOF
This script is used to generate token for a given pulsar role. This script is used to generate token for a given pulsar role.
@ -89,6 +86,10 @@ if [[ "x${role}" == "x" ]]; then
exit 1 exit 1
fi fi
source ${CHART_HOME}/scripts/pulsar/common_auth.sh
pulsar::ensure_pulsarctl
namespace=${namespace:-pulsar} namespace=${namespace:-pulsar}
release=${release:-pulsar-dev} release=${release:-pulsar-dev}
@ -100,6 +101,7 @@ function pulsar::jwt::get_secret() {
if [[ "${local}" == "true" ]]; then if [[ "${local}" == "true" ]]; then
cp ${type} ${tmpfile} cp ${type} ${tmpfile}
else else
echo "kubectl get -n ${namespace} secrets ${secret_name} -o jsonpath="{.data.${type}}" | base64 --decode > ${tmpfile}"
kubectl get -n ${namespace} secrets ${secret_name} -o jsonpath="{.data['${type}']}" | base64 --decode > ${tmpfile} kubectl get -n ${namespace} secrets ${secret_name} -o jsonpath="{.data['${type}']}" | base64 --decode > ${tmpfile}
fi fi
} }
@ -108,41 +110,31 @@ function pulsar::jwt::generate_symmetric_token() {
local token_name="${release}-token-${role}" local token_name="${release}-token-${role}"
local secret_name="${release}-token-symmetric-key" local secret_name="${release}-token-symmetric-key"
tmpfile=$(mktemp)
local tmpdir=$(mktemp -d) trap "test -f $tmpfile && rm $tmpfile" RETURN
trap "test -d $tmpdir && rm -rf $tmpdir" RETURN tokentmpfile=$(mktemp)
secretkeytmpfile=${tmpdir}/secret.key trap "test -f $tokentmpfile && rm $tokentmpfile" RETURN
tokentmpfile=${tmpdir}/token.jwt pulsar::jwt::get_secret SECRETKEY ${tmpfile} ${secret_name}
${PULSARCTL_BIN} token create -a HS256 --secret-key-file ${tmpfile} --subject ${role} 2&> ${tokentmpfile}
pulsar::jwt::get_secret SECRETKEY ${secretkeytmpfile} ${secret_name} newtokentmpfile=$(mktemp)
docker run --user 0 --rm -t -v ${tmpdir}:/keydir ${PULSAR_TOKENS_CONTAINER_IMAGE} bin/pulsar tokens create -a HS256 --subject "${role}" --secret-key=file:/keydir/secret.key > ${tokentmpfile}
newtokentmpfile=${tmpdir}/token.jwt.new
tr -d '\n' < ${tokentmpfile} > ${newtokentmpfile} tr -d '\n' < ${tokentmpfile} > ${newtokentmpfile}
echo "kubectl create secret generic ${token_name} -n ${namespace} --from-file="TOKEN=${newtokentmpfile}" --from-literal="TYPE=symmetric" ${local:+ -o yaml --dry-run=client}"
kubectl create secret generic ${token_name} -n ${namespace} --from-file="TOKEN=${newtokentmpfile}" --from-literal="TYPE=symmetric" ${local:+ -o yaml --dry-run=client} kubectl create secret generic ${token_name} -n ${namespace} --from-file="TOKEN=${newtokentmpfile}" --from-literal="TYPE=symmetric" ${local:+ -o yaml --dry-run=client}
rm -rf $tmpdir
} }
function pulsar::jwt::generate_asymmetric_token() { function pulsar::jwt::generate_asymmetric_token() {
local token_name="${release}-token-${role}" local token_name="${release}-token-${role}"
local secret_name="${release}-token-asymmetric-key" local secret_name="${release}-token-asymmetric-key"
local tmpdir=$(mktemp -d) privatekeytmpfile=$(mktemp)
trap "test -d $tmpdir && rm -rf $tmpdir" RETURN trap "test -f $privatekeytmpfile && rm $privatekeytmpfile" RETURN
tokentmpfile=$(mktemp)
privatekeytmpfile=${tmpdir}/privatekey.der trap "test -f $tokentmpfile && rm $tokentmpfile" RETURN
tokentmpfile=${tmpdir}/token.jwt
pulsar::jwt::get_secret PRIVATEKEY ${privatekeytmpfile} ${secret_name} pulsar::jwt::get_secret PRIVATEKEY ${privatekeytmpfile} ${secret_name}
${PULSARCTL_BIN} token create -a RS256 --private-key-file ${privatekeytmpfile} --subject ${role} 2&> ${tokentmpfile}
# Generate token newtokentmpfile=$(mktemp)
docker run --user 0 --rm -t -v ${tmpdir}:/keydir ${PULSAR_TOKENS_CONTAINER_IMAGE} bin/pulsar tokens create -a RS256 --subject "${role}" --private-key=file:/keydir/privatekey.der > ${tokentmpfile}
newtokentmpfile=${tmpdir}/token.jwt.new
tr -d '\n' < ${tokentmpfile} > ${newtokentmpfile} tr -d '\n' < ${tokentmpfile} > ${newtokentmpfile}
kubectl create secret generic ${token_name} -n ${namespace} --from-file="TOKEN=${newtokentmpfile}" --from-literal="TYPE=asymmetric" ${local:+ -o yaml --dry-run=client} kubectl create secret generic ${token_name} -n ${namespace} --from-file="TOKEN=${newtokentmpfile}" --from-literal="TYPE=asymmetric" ${local:+ -o yaml --dry-run=client}
rm -rf $tmpdir
} }
if [[ "${symmetric}" == "true" ]]; then if [[ "${symmetric}" == "true" ]]; then

View File

@ -20,12 +20,9 @@
set -e set -e
SCRIPT_DIR="$(unset CDPATH && cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" CHART_HOME=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/../.. && pwd)
CHART_HOME=$(unset CDPATH && cd "$SCRIPT_DIR/../.." && pwd)
cd ${CHART_HOME} cd ${CHART_HOME}
source "${SCRIPT_DIR}/common_auth.sh"
usage() { usage() {
cat <<EOF cat <<EOF
This script is used to generate token secret key for a given pulsar helm release. This script is used to generate token secret key for a given pulsar helm release.
@ -77,6 +74,10 @@ case $key in
esac esac
done done
source ${CHART_HOME}/scripts/pulsar/common_auth.sh
pulsar::ensure_pulsarctl
namespace=${namespace:-pulsar} namespace=${namespace:-pulsar}
release=${release:-pulsar-dev} release=${release:-pulsar-dev}
local_cmd=${file:+-o yaml --dry-run=client >secret.yaml} local_cmd=${file:+-o yaml --dry-run=client >secret.yaml}
@ -84,38 +85,31 @@ local_cmd=${file:+-o yaml --dry-run=client >secret.yaml}
function pulsar::jwt::generate_symmetric_key() { function pulsar::jwt::generate_symmetric_key() {
local secret_name="${release}-token-symmetric-key" local secret_name="${release}-token-symmetric-key"
local tmpdir=$(mktemp -d) tmpfile=$(mktemp)
trap "test -d $tmpdir && rm -rf $tmpdir" RETURN trap "test -f $tmpfile && rm $tmpfile" RETURN
local tmpfile=${tmpdir}/SECRETKEY ${PULSARCTL_BIN} token create-secret-key --output-file ${tmpfile}
docker run --rm -t ${PULSAR_TOKENS_CONTAINER_IMAGE} bin/pulsar tokens create-secret-key > "${tmpfile}" mv $tmpfile SECRETKEY
kubectl create secret generic ${secret_name} -n ${namespace} --from-file=$tmpfile ${local:+ -o yaml --dry-run=client} kubectl create secret generic ${secret_name} -n ${namespace} --from-file=SECRETKEY ${local:+ -o yaml --dry-run=client}
# if local is true, keep the file available for debugging purposes if [[ "${local}" != "true" ]]; then
if [[ "${local}" == "true" ]]; then rm SECRETKEY
mv $tmpfile SECRETKEY
fi fi
rm -rf $tmpdir
} }
function pulsar::jwt::generate_asymmetric_key() { function pulsar::jwt::generate_asymmetric_key() {
local secret_name="${release}-token-asymmetric-key" local secret_name="${release}-token-asymmetric-key"
local tmpdir=$(mktemp -d) privatekeytmpfile=$(mktemp)
trap "test -d $tmpdir && rm -rf $tmpdir" RETURN trap "test -f $privatekeytmpfile && rm $privatekeytmpfile" RETURN
publickeytmpfile=$(mktemp)
privatekeytmpfile=${tmpdir}/PRIVATEKEY trap "test -f $publickeytmpfile && rm $publickeytmpfile" RETURN
publickeytmpfile=${tmpdir}/PUBLICKEY ${PULSARCTL_BIN} token create-key-pair -a RS256 --output-private-key ${privatekeytmpfile} --output-public-key ${publickeytmpfile}
mv $privatekeytmpfile PRIVATEKEY
# Generate key pair mv $publickeytmpfile PUBLICKEY
docker run --user 0 --rm -t -v ${tmpdir}:/keydir ${PULSAR_TOKENS_CONTAINER_IMAGE} bin/pulsar tokens create-key-pair --output-private-key=/keydir/PRIVATEKEY --output-public-key=/keydir/PUBLICKEY kubectl create secret generic ${secret_name} -n ${namespace} --from-file=PRIVATEKEY --from-file=PUBLICKEY ${local:+ -o yaml --dry-run=client}
if [[ "${local}" != "true" ]]; then
kubectl create secret generic ${secret_name} -n ${namespace} --from-file=$privatekeytmpfile --from-file=$publickeytmpfile ${local:+ -o yaml --dry-run=client} rm PRIVATEKEY
rm PUBLICKEY
# if local is true, keep the files available for debugging purposes
if [[ "${local}" == "true" ]]; then
mv $privatekeytmpfile PRIVATEKEY
mv $publickeytmpfile PUBLICKEY
fi fi
rm -rf $tmpdir
} }
if [[ "${symmetric}" == "true" ]]; then if [[ "${symmetric}" == "true" ]]; then

View File

@ -74,6 +74,10 @@ if [[ "x${role}" == "x" ]]; then
exit 1 exit 1
fi fi
source ${CHART_HOME}/scripts/pulsar/common_auth.sh
pulsar::ensure_pulsarctl
namespace=${namespace:-pulsar} namespace=${namespace:-pulsar}
release=${release:-pulsar-dev} release=${release:-pulsar-dev}

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script is used to upgrade the Victoria Metrics Operator CRDs before running "helm upgrade"
VM_OPERATOR_VERSION="${1:-"0.42.4"}"
kubectl apply --server-side --force-conflicts -f "https://github.com/VictoriaMetrics/operator/releases/download/v${VM_OPERATOR_VERSION}/crd.yaml"