diff --git a/charts/pulsar/templates/autorecovery-podmonitor.yaml b/charts/pulsar/templates/autorecovery-podmonitor.yaml new file mode 100644 index 0000000..21d9b9f --- /dev/null +++ b/charts/pulsar/templates/autorecovery-podmonitor.yaml @@ -0,0 +1,54 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true +{{- if $.Values.autorecovery.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ template "pulsar.name" . }}-recovery + labels: + app: {{ template "pulsar.name" . }} + chart: {{ template "pulsar.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + jobLabel: recovery + podMetricsEndpoints: + - port: http + path: /metrics + scheme: http + interval: {{ $.Values.autorecovery.podMonitor.interval }} + scrapeTimeout: {{ $.Values.autorecovery.podMonitor.scrapeTimeout }} + relabelings: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: kubernetes_namespace + - sourceLabels: [__meta_kubernetes_pod_label_component] + action: replace + targetLabel: job + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: kubernetes_pod_name + selector: + matchLabels: + component: {{ .Values.autorecovery.component }} +{{- end }} \ No newline at end of file diff --git a/charts/pulsar/templates/bookkeeper-podmonitor.yaml b/charts/pulsar/templates/bookkeeper-podmonitor.yaml index bec86fe..45a4aad 100644 --- a/charts/pulsar/templates/bookkeeper-podmonitor.yaml +++ b/charts/pulsar/templates/bookkeeper-podmonitor.yaml @@ -36,6 +36,18 @@ spec: scheme: http interval: {{ $.Values.bookkeeper.podMonitor.interval }} scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }} + relabelings: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: kubernetes_namespace + - sourceLabels: [__meta_kubernetes_pod_label_component] + action: replace + targetLabel: job + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: kubernetes_pod_name selector: matchLabels: component: bookie diff --git a/charts/pulsar/templates/broker-podmonitor.yaml b/charts/pulsar/templates/broker-podmonitor.yaml index c3474b2..515d218 100644 --- a/charts/pulsar/templates/broker-podmonitor.yaml +++ b/charts/pulsar/templates/broker-podmonitor.yaml @@ -36,6 +36,18 @@ spec: scheme: http interval: {{ $.Values.broker.podMonitor.interval }} scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }} + relabelings: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: kubernetes_namespace + - sourceLabels: [__meta_kubernetes_pod_label_component] + action: replace + targetLabel: job + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: kubernetes_pod_name selector: matchLabels: component: broker diff --git a/charts/pulsar/templates/proxy-podmonitor.yaml b/charts/pulsar/templates/proxy-podmonitor.yaml index bf925bf..18fd9ed 100644 --- a/charts/pulsar/templates/proxy-podmonitor.yaml +++ b/charts/pulsar/templates/proxy-podmonitor.yaml @@ -36,6 +36,18 @@ spec: scheme: http interval: {{ $.Values.proxy.podMonitor.interval }} scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }} + relabelings: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: kubernetes_namespace + - sourceLabels: [__meta_kubernetes_pod_label_component] + action: replace + targetLabel: job + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: kubernetes_pod_name selector: matchLabels: component: proxy diff --git a/charts/pulsar/templates/zookeeper-podmonitor.yaml b/charts/pulsar/templates/zookeeper-podmonitor.yaml index 73b1267..0ca8853 100644 --- a/charts/pulsar/templates/zookeeper-podmonitor.yaml +++ b/charts/pulsar/templates/zookeeper-podmonitor.yaml @@ -36,6 +36,18 @@ spec: scheme: http interval: {{ $.Values.zookeeper.podMonitor.interval }} scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }} + relabelings: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: kubernetes_namespace + - sourceLabels: [__meta_kubernetes_pod_label_component] + action: replace + targetLabel: job + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: kubernetes_pod_name selector: matchLabels: component: zookeeper diff --git a/charts/pulsar/values.yaml b/charts/pulsar/values.yaml index 113ed9a..be05216 100644 --- a/charts/pulsar/values.yaml +++ b/charts/pulsar/values.yaml @@ -522,6 +522,12 @@ autorecovery: # so the metrics are correctly rendered in grafana dashboard component: recovery replicaCount: 1 + # If using Prometheus-Operator enable this PodMonitor to discover autorecovery scrape targets + # # Prometheus-Operator does not add scrape targets based on k8s annotations + podMonitor: + enabled: false + interval: 10s + scrapeTimeout: 10s # True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change restartPodsOnConfigMapChange: false ports: