Extend podmonitor and add relabels (#100)
### Motivation As I wanted to use [streamnative/apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard) with this helm chart and own cluster wide Prometheus stack I decided that use of PodMonitor CRD is a good way. Unfortunately prometheus config has some metrics relabelings that are required by grafana dashboard. I decied to port them directly to PodMonitor definition ### Modifications * Added missing PodMonitor for autorecovery * Port relabelings from `prometheus-configmap.yaml` to each PodMonitor ### Verifying this change - [x] Make sure that the change passes the CI checks.
This commit is contained in:
parent
23ba8ac948
commit
025b263206
54
charts/pulsar/templates/autorecovery-podmonitor.yaml
Normal file
54
charts/pulsar/templates/autorecovery-podmonitor.yaml
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
|
||||||
|
{{- if $.Values.autorecovery.podMonitor.enabled }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PodMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ template "pulsar.name" . }}-recovery
|
||||||
|
labels:
|
||||||
|
app: {{ template "pulsar.name" . }}
|
||||||
|
chart: {{ template "pulsar.chart" . }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
jobLabel: recovery
|
||||||
|
podMetricsEndpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
scheme: http
|
||||||
|
interval: {{ $.Values.autorecovery.podMonitor.interval }}
|
||||||
|
scrapeTimeout: {{ $.Values.autorecovery.podMonitor.scrapeTimeout }}
|
||||||
|
relabelings:
|
||||||
|
- action: labelmap
|
||||||
|
regex: __meta_kubernetes_pod_label_(.+)
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_namespace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||||
|
action: replace
|
||||||
|
targetLabel: job
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_pod_name
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
component: {{ .Values.autorecovery.component }}
|
||||||
|
{{- end }}
|
||||||
@ -36,6 +36,18 @@ spec:
|
|||||||
scheme: http
|
scheme: http
|
||||||
interval: {{ $.Values.bookkeeper.podMonitor.interval }}
|
interval: {{ $.Values.bookkeeper.podMonitor.interval }}
|
||||||
scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }}
|
scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }}
|
||||||
|
relabelings:
|
||||||
|
- action: labelmap
|
||||||
|
regex: __meta_kubernetes_pod_label_(.+)
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_namespace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||||
|
action: replace
|
||||||
|
targetLabel: job
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_pod_name
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
component: bookie
|
component: bookie
|
||||||
|
|||||||
@ -36,6 +36,18 @@ spec:
|
|||||||
scheme: http
|
scheme: http
|
||||||
interval: {{ $.Values.broker.podMonitor.interval }}
|
interval: {{ $.Values.broker.podMonitor.interval }}
|
||||||
scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }}
|
scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }}
|
||||||
|
relabelings:
|
||||||
|
- action: labelmap
|
||||||
|
regex: __meta_kubernetes_pod_label_(.+)
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_namespace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||||
|
action: replace
|
||||||
|
targetLabel: job
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_pod_name
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
component: broker
|
component: broker
|
||||||
|
|||||||
@ -36,6 +36,18 @@ spec:
|
|||||||
scheme: http
|
scheme: http
|
||||||
interval: {{ $.Values.proxy.podMonitor.interval }}
|
interval: {{ $.Values.proxy.podMonitor.interval }}
|
||||||
scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }}
|
scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }}
|
||||||
|
relabelings:
|
||||||
|
- action: labelmap
|
||||||
|
regex: __meta_kubernetes_pod_label_(.+)
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_namespace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||||
|
action: replace
|
||||||
|
targetLabel: job
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_pod_name
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
component: proxy
|
component: proxy
|
||||||
|
|||||||
@ -36,6 +36,18 @@ spec:
|
|||||||
scheme: http
|
scheme: http
|
||||||
interval: {{ $.Values.zookeeper.podMonitor.interval }}
|
interval: {{ $.Values.zookeeper.podMonitor.interval }}
|
||||||
scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }}
|
scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }}
|
||||||
|
relabelings:
|
||||||
|
- action: labelmap
|
||||||
|
regex: __meta_kubernetes_pod_label_(.+)
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_namespace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||||
|
action: replace
|
||||||
|
targetLabel: job
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
action: replace
|
||||||
|
targetLabel: kubernetes_pod_name
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
component: zookeeper
|
component: zookeeper
|
||||||
|
|||||||
@ -522,6 +522,12 @@ autorecovery:
|
|||||||
# so the metrics are correctly rendered in grafana dashboard
|
# so the metrics are correctly rendered in grafana dashboard
|
||||||
component: recovery
|
component: recovery
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
# If using Prometheus-Operator enable this PodMonitor to discover autorecovery scrape targets
|
||||||
|
# # Prometheus-Operator does not add scrape targets based on k8s annotations
|
||||||
|
podMonitor:
|
||||||
|
enabled: false
|
||||||
|
interval: 10s
|
||||||
|
scrapeTimeout: 10s
|
||||||
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
||||||
restartPodsOnConfigMapChange: false
|
restartPodsOnConfigMapChange: false
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user