Extend podmonitor and add relabels (#100)
### Motivation As I wanted to use [streamnative/apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard) with this helm chart and own cluster wide Prometheus stack I decided that use of PodMonitor CRD is a good way. Unfortunately prometheus config has some metrics relabelings that are required by grafana dashboard. I decied to port them directly to PodMonitor definition ### Modifications * Added missing PodMonitor for autorecovery * Port relabelings from `prometheus-configmap.yaml` to each PodMonitor ### Verifying this change - [x] Make sure that the change passes the CI checks.
This commit is contained in:
parent
23ba8ac948
commit
025b263206
54
charts/pulsar/templates/autorecovery-podmonitor.yaml
Normal file
54
charts/pulsar/templates/autorecovery-podmonitor.yaml
Normal file
@ -0,0 +1,54 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
|
||||
{{- if $.Values.autorecovery.podMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ template "pulsar.name" . }}-recovery
|
||||
labels:
|
||||
app: {{ template "pulsar.name" . }}
|
||||
chart: {{ template "pulsar.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
jobLabel: recovery
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: /metrics
|
||||
scheme: http
|
||||
interval: {{ $.Values.autorecovery.podMonitor.interval }}
|
||||
scrapeTimeout: {{ $.Values.autorecovery.podMonitor.scrapeTimeout }}
|
||||
relabelings:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
targetLabel: kubernetes_namespace
|
||||
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||
action: replace
|
||||
targetLabel: job
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
targetLabel: kubernetes_pod_name
|
||||
selector:
|
||||
matchLabels:
|
||||
component: {{ .Values.autorecovery.component }}
|
||||
{{- end }}
|
||||
@ -36,6 +36,18 @@ spec:
|
||||
scheme: http
|
||||
interval: {{ $.Values.bookkeeper.podMonitor.interval }}
|
||||
scrapeTimeout: {{ $.Values.bookkeeper.podMonitor.scrapeTimeout }}
|
||||
relabelings:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
targetLabel: kubernetes_namespace
|
||||
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||
action: replace
|
||||
targetLabel: job
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
targetLabel: kubernetes_pod_name
|
||||
selector:
|
||||
matchLabels:
|
||||
component: bookie
|
||||
|
||||
@ -36,6 +36,18 @@ spec:
|
||||
scheme: http
|
||||
interval: {{ $.Values.broker.podMonitor.interval }}
|
||||
scrapeTimeout: {{ $.Values.broker.podMonitor.scrapeTimeout }}
|
||||
relabelings:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
targetLabel: kubernetes_namespace
|
||||
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||
action: replace
|
||||
targetLabel: job
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
targetLabel: kubernetes_pod_name
|
||||
selector:
|
||||
matchLabels:
|
||||
component: broker
|
||||
|
||||
@ -36,6 +36,18 @@ spec:
|
||||
scheme: http
|
||||
interval: {{ $.Values.proxy.podMonitor.interval }}
|
||||
scrapeTimeout: {{ $.Values.proxy.podMonitor.scrapeTimeout }}
|
||||
relabelings:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
targetLabel: kubernetes_namespace
|
||||
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||
action: replace
|
||||
targetLabel: job
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
targetLabel: kubernetes_pod_name
|
||||
selector:
|
||||
matchLabels:
|
||||
component: proxy
|
||||
|
||||
@ -36,6 +36,18 @@ spec:
|
||||
scheme: http
|
||||
interval: {{ $.Values.zookeeper.podMonitor.interval }}
|
||||
scrapeTimeout: {{ $.Values.zookeeper.podMonitor.scrapeTimeout }}
|
||||
relabelings:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
targetLabel: kubernetes_namespace
|
||||
- sourceLabels: [__meta_kubernetes_pod_label_component]
|
||||
action: replace
|
||||
targetLabel: job
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
targetLabel: kubernetes_pod_name
|
||||
selector:
|
||||
matchLabels:
|
||||
component: zookeeper
|
||||
|
||||
@ -522,6 +522,12 @@ autorecovery:
|
||||
# so the metrics are correctly rendered in grafana dashboard
|
||||
component: recovery
|
||||
replicaCount: 1
|
||||
# If using Prometheus-Operator enable this PodMonitor to discover autorecovery scrape targets
|
||||
# # Prometheus-Operator does not add scrape targets based on k8s annotations
|
||||
podMonitor:
|
||||
enabled: false
|
||||
interval: 10s
|
||||
scrapeTimeout: 10s
|
||||
# True includes annotation for statefulset that contains hash of corresponding configmap, which will cause pods to restart on configmap change
|
||||
restartPodsOnConfigMapChange: false
|
||||
ports:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user