Migrate from kube-prometheus-metrics to victoria-metrics-k8s-stack (#592)

This commit is contained in:
Lari Hotari 2025-03-09 02:36:41 +02:00 committed by GitHub
parent 302db43e91
commit e6f05809bd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 405 additions and 450 deletions

View File

@ -60,7 +60,11 @@ if [[ "$UPGRADE_FROM_VERSION" != "" ]]; then
ALLOW_LOADBALANCERS="true" ALLOW_LOADBALANCERS="true"
# install older version of pulsar chart # install older version of pulsar chart
PULSAR_CHART_VERSION="$UPGRADE_FROM_VERSION" PULSAR_CHART_VERSION="$UPGRADE_FROM_VERSION"
ci::install_pulsar_chart install ${PULSAR_HOME}/.ci/values-common.yaml ${PULSAR_HOME}/${VALUES_FILE} "${extra_opts[@]}"
# Install Prometheus Operator CRDs using the upgrade script since kube-prometheus-stack is now disabled before the upgrade
${PULSAR_HOME}/scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh
ci::install_pulsar_chart install ${PULSAR_HOME}/.ci/values-common.yaml ${PULSAR_HOME}/${VALUES_FILE} --set kube-prometheus-stack.enabled=false "${extra_opts[@]}"
install_type="upgrade" install_type="upgrade"
echo "Wait 10 seconds" echo "Wait 10 seconds"
sleep 10 sleep 10
@ -72,9 +76,9 @@ if [[ "$UPGRADE_FROM_VERSION" != "" ]]; then
ci::test_pulsar_producer_consumer "produce" ci::test_pulsar_producer_consumer "produce"
test_action="consume" test_action="consume"
if [[ "$(ci::helm_values_for_deployment | yq .kube-prometheus-stack.enabled)" == "true" ]]; then if [[ "$(ci::helm_values_for_deployment | yq .victoria-metrics-k8s-stack.enabled)" == "true" ]]; then
echo "Upgrade Prometheus Operator CRDs before upgrading the deployment" echo "Upgrade Victoria Metrics Operator CRDs before upgrading the deployment"
${PULSAR_HOME}/scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh ${PULSAR_HOME}/scripts/victoria-metrics-k8s-stack/upgrade_vm_operator_crds.sh
fi fi
fi fi

View File

@ -17,17 +17,17 @@
# under the License. # under the License.
# #
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: true enabled: true
prometheusOperator: victoria-metrics-operator:
enabled: true enabled: true
prometheus: vmsingle:
enabled: true
vmagent:
enabled: true enabled: true
grafana: grafana:
enabled: true enabled: true
adminPassword: pulsar-ci-admin adminPassword: pulsar-ci-admin
alertmanager:
enabled: false
prometheus-node-exporter: prometheus-node-exporter:
enabled: true enabled: true

View File

@ -84,6 +84,7 @@ function ci::install_cert_manager() {
function ci::helm_repo_add() { function ci::helm_repo_add() {
echo "Adding the helm repo ..." echo "Adding the helm repo ..."
${HELM} repo add prometheus-community https://prometheus-community.github.io/helm-charts ${HELM} repo add prometheus-community https://prometheus-community.github.io/helm-charts
${HELM} repo add vm https://victoriametrics.github.io/helm-charts/
${HELM} repo update ${HELM} repo update
echo "Successfully added the helm repo." echo "Successfully added the helm repo."
} }
@ -117,7 +118,7 @@ function ci::install_pulsar_chart() {
local extra_opts=() local extra_opts=()
local values_next=false local values_next=false
for arg in "$@"; do for arg in "$@"; do
if [[ "$arg" == "--values" ]]; then if [[ "$arg" == "--values" || "$arg" == "--set" ]]; then
extra_values+=("$arg") extra_values+=("$arg")
values_next=true values_next=true
elif [[ "$values_next" == true ]]; then elif [[ "$values_next" == true ]]; then

View File

@ -25,7 +25,7 @@ helmCharts:
- name: pulsar - name: pulsar
releaseName: pulsar releaseName: pulsar
valuesInline: valuesInline:
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
components: components:
pulsar_manager: true pulsar_manager: true

View File

@ -17,15 +17,35 @@
# under the License. # under the License.
# #
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: victoria-metrics-operator:
enabled: false enabled: false
grafana: vmsingle:
enabled: false
vmagent:
enabled: false
vmalert:
enabled: false enabled: false
alertmanager: alertmanager:
enabled: false enabled: false
prometheus: grafana:
enabled: false
prometheus-node-exporter:
enabled: false
kube-state-metrics:
enabled: false
kubelet:
enabled: false
kubeApiServer:
enabled: false
kubeControllerManager:
enabled: false
coreDns:
enabled: false
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false enabled: false
# disabled AntiAffinity # disabled AntiAffinity

View File

@ -151,7 +151,7 @@ jobs:
else else
echo "" echo ""
fi fi
helm template charts/pulsar --set kube-prometheus-stack.enabled=false --set components.pulsar_manager=true --kube-version $kube_version "$@" | \ helm template charts/pulsar --set victoria-metrics-k8s-stack.enabled=false --set components.pulsar_manager=true --kube-version $kube_version "$@" | \
kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' -strict -kubernetes-version $kube_version -summary kubeconform -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' -strict -kubernetes-version $kube_version -summary
} }
set -o pipefail set -o pipefail
@ -242,9 +242,9 @@ jobs:
version: "1.25.16" version: "1.25.16"
kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025 kind_image_tag: v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025
testScenario: testScenario:
name: "Upgrade kube-prometheus-stack for previous LTS" name: "Upgrade victoria-metrics-k8s-stack for previous LTS"
values_file: .ci/clusters/values-prometheus-grafana.yaml --values .ci/clusters/values-pulsar-previous-lts.yaml values_file: .ci/clusters/values-victoria-metrics-grafana.yaml --values .ci/clusters/values-pulsar-previous-lts.yaml
shortname: prometheus-grafana shortname: victoria-metrics-grafana
type: upgrade type: upgrade
upgradeFromVersion: 3.2.0 upgradeFromVersion: 3.2.0
- k8sVersion: - k8sVersion:

View File

@ -141,7 +141,7 @@ This Helm Chart includes all the components of Apache Pulsar for a complete expe
- [x] Management & monitoring components: - [x] Management & monitoring components:
- [x] Pulsar Manager - [x] Pulsar Manager
- [x] Optional PodMonitors for each component (enabled by default) - [x] Optional PodMonitors for each component (enabled by default)
- [x] [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) (as of 3.0.0) - [x] [victoria-metrics-k8s-stack](hhttps://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack) (as of 4.0.0)
It includes support for: It includes support for:
@ -276,25 +276,26 @@ You can also checkout out the example values file for different deployments.
- [Deploy a Pulsar cluster with JWT authentication using symmetric key](examples/values-jwt-symmetric.yaml) - [Deploy a Pulsar cluster with JWT authentication using symmetric key](examples/values-jwt-symmetric.yaml)
- [Deploy a Pulsar cluster with JWT authentication using asymmetric key](examples/values-jwt-asymmetric.yaml) - [Deploy a Pulsar cluster with JWT authentication using asymmetric key](examples/values-jwt-asymmetric.yaml)
## Disabling Kube-Prometheus-Stack CRDs ## Disabling victoria-metrics-k8s-stack components
In order to disable the kube-prometheus-stack fully, it is necessary to add the following to your `values.yaml`: In order to disable the victoria-metrics-k8s-stack, you can add the following to your `values.yaml`.
Victoria Metrics components can also be disabled and enabled individually if you only need specific monitoring features.
Please refer to the default [`values.yaml`](charts/pulsar/values.yaml).
```yaml ```yaml
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: victoria-metrics-operator:
enabled: false
kube-state-metrics:
enabled: false
prometheus-node-exporter:
enabled: false enabled: false
grafana: grafana:
enabled: false enabled: false
alertmanager:
enabled: false
prometheus:
enabled: false
``` ```
Otherwise, the helm chart installation will attempt to install the CRDs for the kube-prometheus-stack. Additionally, Additionally, you'll need to set each component's `podMonitor` property to `false`. This is shown in some [examples](./examples) and is
you'll need to disable each of the component's `PodMonitors`. This is shown in some [examples](./examples) and is
verified in some [tests](./.ci/clusters). verified in some [tests](./.ci/clusters).
## Pulsar Manager ## Pulsar Manager
@ -319,12 +320,12 @@ kubectl get secret -l component=pulsar-manager -o=jsonpath="{.items[0].data.UI_P
## Grafana Dashboards ## Grafana Dashboards
The Apache Pulsar Helm Chart uses the `kube-prometheus-stack` Helm Chart to deploy Grafana. The Apache Pulsar Helm Chart uses the `victoria-metrics-k8s-stack` Helm Chart to deploy Grafana.
There are several ways to configure Grafana dashboards. The default `values.yaml` comes with examples of Pulsar dashboards which get downloaded from the Apache-2.0 licensed [streamnative/apache-pulsar-grafana-dashboard OSS project](https://github.com/streamnative/apache-pulsar-grafana-dashboard) by URL. There are several ways to configure Grafana dashboards. The default [`values.yaml`](charts/pulsar/values.yaml) comes with examples of Pulsar dashboards which get downloaded from the Apache-2.0 licensed [lhotari/pulsar-grafana-dashboards OSS project](https://github.com/lhotari/pulsar-grafana-dashboards) by URL.
Dashboards can be configured in `values.yaml` or by adding `ConfigMap` items with the label `grafana_dashboard: "1"`. Dashboards can be configured in [`values.yaml`](charts/pulsar/values.yaml) or by adding `ConfigMap` items with the label `grafana_dashboard: "1"`.
In `values.yaml`, it's possible to include dashboards by URL or by grafana.com dashboard id (`gnetId` and `revision`). In [`values.yaml`](charts/pulsar/values.yaml), it's possible to include dashboards by URL or by grafana.com dashboard id (`gnetId` and `revision`).
Please see the [Grafana Helm chart documentation for importing dashboards](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md#import-dashboards). Please see the [Grafana Helm chart documentation for importing dashboards](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md#import-dashboards).
You can connect to Grafana by forwarding port 3000 You can connect to Grafana by forwarding port 3000
@ -354,6 +355,8 @@ updates should be done using `helm upgrade`.
```bash ```bash
helm repo add apachepulsar https://pulsar.apache.org/charts helm repo add apachepulsar https://pulsar.apache.org/charts
helm repo update helm repo update
# If you are using the provided victoria-metrics-k8s-stack for monitoring, this installs or upgrades the required CRDs
./scripts/victoria-metrics-k8s-stack/upgrade_vm_operator_crds.sh
# get the existing values.yaml used for the most recent deployment # get the existing values.yaml used for the most recent deployment
helm get values -n <namespace> <pulsar-release-name> > values.yaml helm get values -n <namespace> <pulsar-release-name> > values.yaml
# upgrade the deployment # upgrade the deployment
@ -362,65 +365,29 @@ helm upgrade -n <namespace> -f values.yaml <pulsar-release-name> apachepulsar/pu
For more detailed information, see our [Upgrading](http://pulsar.apache.org/docs/helm-upgrade/) guide. For more detailed information, see our [Upgrading](http://pulsar.apache.org/docs/helm-upgrade/) guide.
## Upgrading from Helm Chart version 3.x.x to 4.0.0 version and above ## Upgrading from Helm Chart versions before 4.0.0 to 4.0.0 version and above
### Pulsar Proxy service's default type has been changed from `LoadBalancer` to `ClusterIP` ### Pulsar Proxy service's default type has been changed from `LoadBalancer` to `ClusterIP`
Please check the section "External Access Recommendations" for guidance and also check the security advisory section. Please check the section "External Access Recommendations" for guidance and also check the security advisory section.
You will need to configure keys under `proxy.service` in your `values.yaml` to preserve existing functionality since the default has been changed. You will need to configure keys under `proxy.service` in your `values.yaml` to preserve existing functionality since the default has been changed.
### kube-prometheus-stack upgrade ### kube-prometheus-stack replaced with victoria-metrics-k8s-stack
The kube-prometheus-stack version has been upgraded to 69.x.x in Pulsar Helm Chart version 4.0.0 . The kube-prometheus-stack version has been removed in Pulsar Helm Chart version 4.0.0 due to
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed Prometheus incompatibility with Pulsar metrics since Pulsar 2.11.0 . Pulsar exposes metrics in a format that is partially OpenMetrics 1.0.0 text format,
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-68x-to-69x). but isn't fully compatible. Prometheus doesn't provide proper support for OpenMetrics 1.0.0 text format, even in Prometheus version 3.2.1 where it was
extensively tested before switching to Victoria Metrics in Pulsar Helm chart version 4.0.0 . Victoria Metrics is Apache 2.0 Licensed OSS and it's fully
compatible with Prometheus.
There's a script to run the required commands: Before upgrading to Pulsar Helm Chart version 4.0.0, it is recommended to disable kube-prometheus-stack in the original Helm chart version that
is used:
```shell ```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.80.0 # get the existing values.yaml used for the most recent deployment
``` helm get values -n <namespace> <pulsar-release-name> > values.yaml
# disable kube-prometheus-stack in the currently used version before upgrading to Pulsar Helm chart 4.0.0
After, this you can proceed with `helm upgrade`. helm upgrade -n <namespace> -f values.yaml --version <your-current-chart-version> --set kube-prometheus-stack.enabled=false <pulsar-release-name> apachepulsar/pulsar
## Upgrading from Helm Chart version 3.0.0-3.6.0 to 3.7.0 version and above
The kube-prometheus-stack version has been upgraded to 65.x.x in Pulsar Helm Chart version 3.7.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-64x-to-65x).
There's a script to run the required commands:
```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.77.1
```
After, this you can proceed with `helm upgrade`.
## Upgrading from Helm Chart version 3.0.0-3.4.x to 3.5.0 version and above
The kube-prometheus-stack version has been upgraded to 59.x.x in Pulsar Helm Chart version 3.5.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-58x-to-59x).
There's a script to run the required commands:
```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.74.0
```
After, this you can proceed with `helm upgrade`.
## Upgrading from Helm Chart version 3.0.0-3.2.x to 3.3.0 version and above
The kube-prometheus-stack version has been upgraded to 56.x.x in Pulsar Helm Chart version 3.3.0 .
Before running "helm upgrade", you should first upgrade the Prometheus Operator CRDs as [instructed
in kube-prometheus-stack upgrade notes](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-55x-to-56x).
There's a script to run the required commands:
```shell
./scripts/kube-prometheus-stack/upgrade_prometheus_operator_crds.sh 0.71.0
``` ```
After, this you can proceed with `helm upgrade`. After, this you can proceed with `helm upgrade`.
@ -430,7 +397,7 @@ After, this you can proceed with `helm upgrade`.
The 2.10.0+ Apache Pulsar docker image is a non-root container, by default. That complicates an upgrade to 2.10.0 The 2.10.0+ Apache Pulsar docker image is a non-root container, by default. That complicates an upgrade to 2.10.0
because the existing files are owned by the root user but are not writable by the root group. In order to leverage this because the existing files are owned by the root user but are not writable by the root group. In order to leverage this
new security feature, the Bookkeeper and Zookeeper StatefulSet [securityContexts](https://kubernetes.io/docs/tasks/configure-pod-container/security-context) new security feature, the Bookkeeper and Zookeeper StatefulSet [securityContexts](https://kubernetes.io/docs/tasks/configure-pod-container/security-context)
are configurable in the `values.yaml`. They default to: are configurable in the [`values.yaml`](charts/pulsar/values.yaml). They default to:
```yaml ```yaml
securityContext: securityContext:
@ -478,6 +445,7 @@ Caused by: org.rocksdb.RocksDBException: while open a file for lock: /pulsar/dat
### Recovering from `helm upgrade` error "unable to build kubernetes objects from current release manifest" ### Recovering from `helm upgrade` error "unable to build kubernetes objects from current release manifest"
Example of the error message: Example of the error message:
```bash ```bash
Error: UPGRADE FAILED: unable to build kubernetes objects from current release manifest: Error: UPGRADE FAILED: unable to build kubernetes objects from current release manifest:
[resource mapping not found for name: "pulsar-bookie" namespace: "pulsar" from "": [resource mapping not found for name: "pulsar-bookie" namespace: "pulsar" from "":

View File

@ -32,7 +32,7 @@ maintainers:
- name: The Apache Pulsar Team - name: The Apache Pulsar Team
email: dev@pulsar.apache.org email: dev@pulsar.apache.org
dependencies: dependencies:
- name: kube-prometheus-stack - name: victoria-metrics-k8s-stack
version: 67.x.x version: 0.38.x
repository: https://prometheus-community.github.io/helm-charts repository: https://victoriametrics.github.io/helm-charts/
condition: kube-prometheus-stack.enabled condition: victoria-metrics-k8s-stack.enabled

View File

@ -0,0 +1,86 @@
{{/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/}}
{{- define "pulsar.podMonitor" -}}
{{- $root := index . 0 }}
{{- $component := index . 1 }}
{{- $matchLabel := index . 2 }}
{{- $portName := "http" }}
{{- if gt (len .) 3 }}
{{- $portName = index . 3 }}
{{- end }}
{{/* Extract component parts for nested values */}}
{{- $componentParts := splitList "." $component }}
{{- $valuesPath := $root.Values }}
{{- range $componentParts }}
{{- $valuesPath = index $valuesPath . }}
{{- end }}
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
{{- else }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
{{- end }}
metadata:
name: {{ template "pulsar.fullname" $root }}-{{ replace "." "-" $component }}
labels:
app: {{ template "pulsar.name" $root }}
chart: {{ template "pulsar.chart" $root }}
release: {{ $root.Release.Name }}
heritage: {{ $root.Release.Service }}
spec:
jobLabel: {{ replace "." "-" $component }}
podMetricsEndpoints:
- port: {{ $portName }}
path: /metrics
scheme: http
interval: {{ $valuesPath.podMonitor.interval }}
scrapeTimeout: {{ $valuesPath.podMonitor.scrapeTimeout }}
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
relabelConfigs:
{{- else }}
relabelings:
{{- end }}
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- with $valuesPath.podMonitor.metricRelabelings }}
{{- if index $root.Values "victoria-metrics-k8s-stack" "enabled" }}
metricRelabelConfigs:
{{- else }}
metricRelabelings:
{{- end }}
{{ toYaml . | indent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" $root | nindent 6 }}
{{ $matchLabel }}
{{- end -}}

View File

@ -17,43 +17,7 @@
# under the License. # under the License.
# #
# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true # deploy autorecovery PodMonitor only when `$.Values.autorecovery.podMonitor.enabled` is true
{{- if $.Values.autorecovery.podMonitor.enabled }} {{- if $.Values.autorecovery.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "autorecovery" (printf "component: %s" .Values.autorecovery.component)) }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.name" . }}-recovery
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: recovery
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.autorecovery.podMonitor.interval }}
scrapeTimeout: {{ $.Values.autorecovery.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.autorecovery.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.autorecovery.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: {{ .Values.autorecovery.component }}
{{- end }} {{- end }}

View File

@ -19,41 +19,5 @@
# deploy bookkeeper PodMonitor only when `$.Values.bookkeeper.podMonitor.enabled` is true # deploy bookkeeper PodMonitor only when `$.Values.bookkeeper.podMonitor.enabled` is true
{{- if $.Values.bookkeeper.podMonitor.enabled }} {{- if $.Values.bookkeeper.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "bookkeeper" (printf "component: %s" .Values.bookkeeper.component)) }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-bookie
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: bookie
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.bookkeeper.podMonitor.interval }}
scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.bookkeeper.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.bookkeeper.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: bookie
{{- end }} {{- end }}

View File

@ -19,41 +19,5 @@
# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true # deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
{{- if $.Values.broker.podMonitor.enabled }} {{- if $.Values.broker.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "broker" (printf "component: %s" .Values.broker.component)) }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-broker
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: broker
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.broker.podMonitor.interval }}
scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.broker.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.broker.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: broker
{{- end }} {{- end }}

View File

@ -17,43 +17,7 @@
# under the License. # under the License.
# #
# deploy oxia-coordinator PodMonitor only when `$.Values.oxia.podMonitor.enabled` is true # deploy oxia-coordinator PodMonitor only when `$.Values.oxia.coordinator.podMonitor.enabled` is true
{{- if and $.Values.components.oxia $.Values.oxia.coordinator.podMonitor.enabled }} {{- if and $.Values.components.oxia $.Values.oxia.coordinator.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "oxia.coordinator" "app.kubernetes.io/component: oxia-coordinator" "metrics") }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-oxia-coordinator
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: oxia-coordinator
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: metrics
path: /metrics
scheme: http
interval: {{ $.Values.oxia.coordinator.podMonitor.interval }}
scrapeTimeout: {{ $.Values.oxia.coordinator.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.oxia.coordinator.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.oxia.coordinator.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: oxia-coordinator
{{- end }} {{- end }}

View File

@ -17,43 +17,7 @@
# under the License. # under the License.
# #
# deploy oxia-server PodMonitor only when `$.Values.oxia.podMonitor.enabled` is true # deploy oxia-server PodMonitor only when `$.Values.oxia.server.podMonitor.enabled` is true
{{- if and $.Values.components.oxia $.Values.oxia.server.podMonitor.enabled }} {{- if and $.Values.components.oxia $.Values.oxia.server.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "oxia.server" "app.kubernetes.io/component: oxia-server" "metrics") }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-oxia-server
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: oxia-server
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: metrics
path: /metrics
scheme: http
interval: {{ $.Values.oxia.server.podMonitor.interval }}
scrapeTimeout: {{ $.Values.oxia.server.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.oxia.server.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.oxia.server.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: oxia-server
{{- end }} {{- end }}

View File

@ -19,41 +19,5 @@
# deploy proxy PodMonitor only when `$.Values.proxy.podMonitor.enabled` is true # deploy proxy PodMonitor only when `$.Values.proxy.podMonitor.enabled` is true
{{- if $.Values.proxy.podMonitor.enabled }} {{- if $.Values.proxy.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "proxy" (printf "component: %s" .Values.proxy.component) "sts-http") }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-proxy
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: proxy
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.proxy.podMonitor.interval }}
scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.proxy.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.proxy.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: proxy
{{- end }} {{- end }}

View File

@ -20,42 +20,6 @@
# deploy zookeeper PodMonitor only when `$.Values.zookeeper.podMonitor.enabled` is true # deploy zookeeper PodMonitor only when `$.Values.zookeeper.podMonitor.enabled` is true
{{- if .Values.components.zookeeper }} {{- if .Values.components.zookeeper }}
{{- if $.Values.zookeeper.podMonitor.enabled }} {{- if $.Values.zookeeper.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1 {{- include "pulsar.podMonitor" (list . "zookeeper" (printf "component: %s" .Values.zookeeper.component)) }}
kind: PodMonitor
metadata:
name: {{ template "pulsar.fullname" . }}-zookeeper
labels:
app: {{ template "pulsar.name" . }}
chart: {{ template "pulsar.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
jobLabel: zookeeper
fallbackScrapeProtocol: PrometheusText0.0.4
podMetricsEndpoints:
- port: http
path: /metrics
scheme: http
interval: {{ $.Values.zookeeper.podMonitor.interval }}
scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }}
relabelings:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_label_component]
action: replace
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
{{- if $.Values.zookeeper.podMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml $.Values.zookeeper.podMonitor.metricRelabelings | nindent 8 }}
{{- end }}
selector:
matchLabels:
{{- include "pulsar.matchLabels" . | nindent 6 }}
component: zookeeper
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -25,10 +25,8 @@
### Note: Prefer using helm's --namespace flag with --create-namespace instead ### Note: Prefer using helm's --namespace flag with --create-namespace instead
## The namespace to use to deploy the Pulsar components. If left empty, ## The namespace to use to deploy the Pulsar components. If left empty,
## it will default to .Release.Namespace (aka helm --namespace). ## it will default to .Release.Namespace (aka helm --namespace).
## Please note that kube-prometheus-stack will not be able to scrape Pulsar component metrics by default unless ## Please note that victoria-metrics-k8s-stack might not be able to scrape Pulsar component metrics by default unless
## it is deployed in the same namespace as Pulsar. The kube-prometheus-stack namespace can be configured by setting ## it is deployed in the same namespace as Pulsar.
## the kube-prometheus-stack.namespaceOverride key to match Pulsar's namespace.
## More details are provided in the comments for the kube-prometheus-stack.namespaceOverride key later in this file.
namespace: "" namespace: ""
namespaceCreate: false namespaceCreate: false
@ -340,7 +338,7 @@ zookeeper:
type: RollingUpdate type: RollingUpdate
podManagementPolicy: Parallel podManagementPolicy: Parallel
initContainers: [] initContainers: []
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -501,7 +499,7 @@ oxia:
replicationFactor: 3 replicationFactor: 3
## templates/coordinator-deployment.yaml ## templates/coordinator-deployment.yaml
coordinator: coordinator:
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -523,7 +521,7 @@ oxia:
# cloud.google.com/gke-nodepool: default-pool # cloud.google.com/gke-nodepool: default-pool
## templates/server-statefulset.yaml ## templates/server-statefulset.yaml
server: server:
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -596,7 +594,7 @@ bookkeeper:
type: RollingUpdate type: RollingUpdate
podManagementPolicy: Parallel podManagementPolicy: Parallel
initContainers: [] initContainers: []
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -807,7 +805,7 @@ autorecovery:
component: recovery component: recovery
replicaCount: 1 replicaCount: 1
initContainers: [] initContainers: []
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -972,7 +970,7 @@ broker:
# The podManagementPolicy cannot be modified for an existing deployment. If you need to change this value, you will need to manually delete the existing broker StatefulSet and then redeploy the chart. # The podManagementPolicy cannot be modified for an existing deployment. If you need to change this value, you will need to manually delete the existing broker StatefulSet and then redeploy the chart.
podManagementPolicy: podManagementPolicy:
initContainers: [] initContainers: []
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -1219,7 +1217,7 @@ proxy:
metrics: ~ metrics: ~
behavior: ~ behavior: ~
initContainers: [] initContainers: []
# This is how prometheus discovers this component # This is how Victoria Metrics or Prometheus discovers this component
podMonitor: podMonitor:
enabled: true enabled: true
interval: 60s interval: 60s
@ -1463,115 +1461,222 @@ toolset:
additionalCommand: additionalCommand:
############################################################# #############################################################
### Monitoring Stack : kube-prometheus-stack chart ### Monitoring Stack : victoria-metrics-k8s-stack chart
############################################################# #############################################################
## Prometheus, Grafana, and the rest of the kube-prometheus-stack are managed by the dependent chart here: ## Victoria Metrics, Grafana, and the rest of the monitoring stack are managed by the dependent chart here:
## https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack ## https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack
## For sample values, please see their documentation. ## For sample values, please see: https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/values.yaml
kube-prometheus-stack: victoria-metrics-k8s-stack:
## Enable the kube-prometheus-stack chart ## Enable the victoria-metrics-k8s-stack chart
enabled: true enabled: true
## This applies to deployments which don't use helm's --namespace flag to set the namespace.
## If Pulsar's namespace is manually set using the `namespace` key, this setting should match the same namespace, ## VictoriaMetrics Operator dependency chart configuration
## otherwise Prometheus will not be able to scrape the Pulsar metrics due to RBAC restrictions. victoria-metrics-operator:
## See https://prometheus-operator.dev/kube-prometheus/kube/monitoring-other-namespaces/ if you need to install
## kube-prometheus-stack in a different namespace than Pulsar.
# namespaceOverride: ""
## Manages Prometheus and Alertmanager components
prometheusOperator:
enabled: true enabled: true
## Prometheus component # Install CRDs for VictoriaMetrics Operator
prometheus: crds:
plain: true
operator:
## By default, operator is configured to not convert Prometheus Operator monitoring.coreos.com/v1 objects
## to Victoria Metrics operator operator.victoriametrics.com/v1beta1 objects.
# Enable this if you want to use Prometheus Operator objects for other purposes.
disable_prometheus_converter: true
## Single-node VM instance
vmsingle:
enabled: true enabled: true
## -- Full spec for VMSingle CRD. Allowed values describe [here](https://docs.victoriametrics.com/operator/api#vmsinglespec)
spec:
retentionPeriod: "10d"
storage:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
## VM Agent for scraping metrics
vmagent:
enabled: true
## Minikube specific settings - uncomment when using minikube
# spec:
# volumes:
# - hostPath:
# path: /var/lib/minikube/certs/etcd
# type: DirectoryOrCreate
# name: etcd-certs
# volumeMounts:
# - mountPath: /var/lib/minikube/certs/etcd
# name: etcd-certs
## VM Alert for alerting rules - disabled by default
vmalert:
enabled: false
## Alertmanager component - disabled by default
alertmanager:
enabled: false
## Grafana component ## Grafana component
## Refer to https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
grafana: grafana:
enabled: true enabled: true
# Use random password at installation time for Grafana by default by setting empty value to `adminPassword`. # Use random password at installation time for Grafana by default by setting empty value to `adminPassword`.
# You can find out the actual password by running the following command: # You can find out the actual password by running the following command:
# kubectl get secret -l app.kubernetes.io/name=grafana -o=jsonpath="{.items[0].data.admin-password}" | base64 --decode # kubectl get secret -l app.kubernetes.io/name=grafana -o=jsonpath="{.items[0].data.admin-password}" | base64 --decode
adminPassword: adminPassword:
# Configure Pulsar dashboards for Grafana persistence:
enabled: true
size: 5Gi
## Disable Grafana sidecar dashboards
## since this cannot be enabled in the same time as dashboards are enabled
sidecar:
dashboards:
enabled: false
## Configure Pulsar dashboards for Grafana
dashboardProviders: dashboardProviders:
dashboardproviders.yaml: dashboardproviders.yaml:
apiVersion: 1 apiVersion: 1
providers: providers:
- name: 'pulsar' - name: oxia
orgId: 1 orgId: 1
folder: 'Pulsar' folder: Oxia
type: file type: file
disableDeletion: true disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards/oxia
- name: pulsar
orgId: 1
folder: Pulsar
type: file
disableDeletion: false
editable: true
allowUiUpdates: true
options: options:
path: /var/lib/grafana/dashboards/pulsar path: /var/lib/grafana/dashboards/pulsar
dashboards: dashboards:
oxia:
oxia-containers:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-containers.json
oxia-coordinator:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-coordinator.json
oxia-golang:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-golang.json
oxia-grpc:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-grpc.json
oxia-nodes:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-nodes.json
oxia-overview:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-overview.json
oxia-shards:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/oxia/oxia-shards.json
pulsar: pulsar:
# Download the maintained dashboards from AL 2.0 licenced repo https://github.com/streamnative/apache-pulsar-grafana-dashboard bookkeeper-compaction:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper-compaction.json
bookkeeper-read-cache:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper-read-cache.json
bookkeeper-read-use:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper-read-use.json
bookkeeper: bookkeeper:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/bookkeeper.json url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/bookkeeper.json
datasource: Prometheus broker-cache-by-broker:
broker: url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/broker-cache-by-broker.json
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/broker.json
datasource: Prometheus
connector_sink:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_sink.json
datasource: Prometheus
connector_source:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/connector_source.json
datasource: Prometheus
container:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/container.json
datasource: Prometheus
functions:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/functions.json
datasource: Prometheus
jvm:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/jvm.json
datasource: Prometheus
loadbalance:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/loadbalance.json
datasource: Prometheus
messaging:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/messaging.json
datasource: Prometheus
node:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/node.json
datasource: Prometheus
overview:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/overview.json
datasource: Prometheus
proxy:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/proxy.json
datasource: Prometheus
recovery:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/recovery.json
datasource: Prometheus
topic:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/topic.json
datasource: Prometheus
transaction:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/transaction.json
datasource: Prometheus
zookeeper:
url: https://raw.githubusercontent.com/streamnative/apache-pulsar-grafana-dashboard/master/dashboards.kubernetes/zookeeper-3.6.json
datasource: Prometheus
offloader:
url: https://raw.githubusercontent.com/apache/pulsar/refs/heads/master/grafana/dashboards/offloader.json
datasource: Prometheus
broker-cache: broker-cache:
url: https://raw.githubusercontent.com/datastax/pulsar-helm-chart/refs/heads/master/helm-chart-sources/pulsar/grafana-dashboards/broker-cache-by-broker.json url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/broker-cache.json
datasource: Prometheus connector-sink:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/connector-sink.json
connector-source:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/connector-source.json
functions:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/functions.json
jvm:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/jvm.json
load-balancing:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/load-balancing.json
messaging:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/messaging.json
namespace:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/namespace.json
node:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/node.json
offloader:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/offloader.json
overview-by-broker:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/overview-by-broker.json
overview:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/overview.json
proxy:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/proxy.json
sockets: sockets:
url: https://raw.githubusercontent.com/datastax/pulsar-helm-chart/refs/heads/master/helm-chart-sources/pulsar/grafana-dashboards/sockets.json url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/sockets.json
datasource: Prometheus tenant:
## Prometheus node exporter component url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/tenant.json
topic:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/topic.json
zookeeper:
url: https://raw.githubusercontent.com/lhotari/pulsar-grafana-dashboards/master/pulsar/zookeeper.json
## Node exporter component
prometheus-node-exporter: prometheus-node-exporter:
enabled: true enabled: true
hostRootFsMount: hostRootFsMount:
enabled: false enabled: false
## Alertmanager component
alertmanager: ## Kube state metrics component
enabled: false kube-state-metrics:
enabled: true
## Components scraping Kubernetes services
kubelet:
enabled: true
kubeApiServer:
enabled: true
kubeControllerManager:
enabled: true
## Additional settings for minikube environments
vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true # For development environments like minikube
coreDns:
enabled: true
kubeEtcd:
enabled: true
## Minikube specific settings - uncomment or adjust when using minikube
# service:
# port: 2381
# targetPort: 2381
# vmScrape:
# spec:
# endpoints:
# - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
# port: http-metrics
# scheme: http # Minikube often uses http instead of https for etcd
kubeScheduler:
enabled: true
## Additional settings for minikube environments
vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true # For development environments like minikube
## Components Stack: pulsar_manager ## Components Stack: pulsar_manager
## templates/pulsar-manager.yaml ## templates/pulsar-manager.yaml

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -28,7 +28,7 @@ components:
pulsar_manager: true pulsar_manager: true
## disable monitoring stack ## disable monitoring stack
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -37,7 +37,7 @@ components:
pulsar_manager: false pulsar_manager: false
## disable monitoring stack ## disable monitoring stack
kube-prometheus-stack: victoria-metrics-k8s-stack:
enabled: false enabled: false
prometheusOperator: prometheusOperator:
enabled: false enabled: false

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script is used to upgrade the Victoria Metrics Operator CRDs before running "helm upgrade"
VM_OPERATOR_VERSION="${1:-"0.42.4"}"
kubectl apply --server-side --force-conflicts -f "https://github.com/VictoriaMetrics/operator/releases/download/v${VM_OPERATOR_VERSION}/crd.yaml"