roman-popenov 4d00b385ac [deployment][helm] Add Grafana ingress template (#6280)
### Motivation
Exposing Grafana via soft ingress controller so that it can be exposed through a Load Balancer. 

#### Proposed solution:
Create ingress template for Grafana so that it can be automatically picked up if ingress controller instance is running in the cluster. The other solutions are to expose Grafana as NodePort or setting it as a LoadBalancer.

### Modifications
Added `grafana-ingress.yaml` template in the templates and an `ingress` section for Grafana in the values file.

### Verifying this change
1) Set ingress to `true` for Grafana in values file and provide hostname. Currently tested with NGINX, but can use another ingress controller, but will need to change the ingress controller class to another one in the template.

2) Add NGINX Helm repository :

```bash
helm repo add nginx-stable https://helm.nginx.com/stable
helm repo update
```
3) Install with Helm 3:

```bash
helm install nginix-ingress-crl nginx-stable/nginx-ingress
```

4) Follow the instructions on how deploying helm and run:
`helm install pulsar --values pulsar/values-mini.yaml ./pulsar/`. 

5) Wait until all the services are up and running.

6) Verify that Grafana is accessible via url.


**Path settings**

Currently, by default the path setting is set to `/grafana`. For that to work, the NGINX configuration file `nginx.conf` should have `grafana` sub path enabled:
```    
See https://grafana.com/docs/grafana/latest/installation/behind_proxy/

To avoid having to mess with NGINX configurations files `path` can be changed to `/`, but this path might conflict with other services that are being proxied in the cluster.

#### Modules affected:
The changes in the PR are affecting the deployment using the helm charts. Now the if the flag `functionsAsPods` is set to `yes` inside the `values.yaml. file, the functions would run as pods.

### Documentation
This PR will be adding ingress capability for Grafana and this should be documented.
2020-02-10 00:09:56 -08:00

508 lines
14 KiB
YAML

#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
## Namespace to deploy pulsar
namespace: pulsar
namespaceCreate: yes
## If persistence is enabled, components that have state will
## be deployed with PersistentVolumeClaims, otherwise, for test
## purposes, they will be deployed with emptyDir
persistence: no
## If prometheus_persistence is enabled, prometheus will be deployed
## with PersistentVolumeClaims, otherwise, for test purposes, they
## will be deployed with emptyDir
prometheus_persistence: yes
prometheus_rbac: yes
## which extra components to deploy
extra:
# Pulsar proxy
proxy: yes
# Bookkeeper auto-recovery
autoRecovery: yes
# Pulsar dashboard
# Deprecated
# Replace pulsar-dashboard with pulsar-manager
dashboard: no
# pulsar manager
pulsar_manager: yes
# Bastion pod for administrative commands
bastion: yes
# Monitoring stack (prometheus and grafana)
monitoring: yes
## Which pulsar image to use
image:
repository: apachepulsar/pulsar-all
tag: latest
pullPolicy: IfNotPresent
## Pulsar: Zookeeper cluster
## templates/zookeeper-statefulset.yaml
##
zookeeper:
component: zookeeper
replicaCount: 3
updateStrategy:
type: OnDelete
podManagementPolicy: OrderedReady
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
volumes:
data:
name: data
size: 20Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Zookeeper configmap
## templates/zookeeper-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms15g -Xmx15g -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no\""
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
## Zookeeper service
## templates/zookeeper-service.yaml
##
service:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
- name: stats
port: 2181
## Zookeeper PodDisruptionBudget
## templates/zookeeper-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar Zookeeper metadata. The metadata will be deployed as
## soon as the last zookeeper node is reachable. The deployment
## of other components that depends on zookeeper, such as the
## bookkeeper nodes, broker nodes, etc will only start to be
## deployed when the zookeeper cluster is ready and with the
## metadata deployed
zookeeperMetadata:
component: zookeeper-metadata
## Pulsar: Bookkeeper cluster
## templates/bookkeeper-statefulset.yaml
##
bookkeeper:
component: bookkeeper
replicaCount: 4
updateStrategy:
type: OnDelete
podManagementPolicy: OrderedReady
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
volumes:
journal:
name: journal
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
ledgers:
name: ledgers
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-ssd
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Bookkeeper configmap
## templates/bookkeeper-configmap.yaml
##
configData:
BOOKIE_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -verbosegc -XX:G1LogLevel=finest\""
BOOKIE_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
PULSAR_PREFIX_dbStorage_writeCacheMaxSizeMb: "2048"
PULSAR_PREFIX_dbStorage_readAheadCacheMaxSizeMb: "2048"
PULSAR_PREFIX_dbStorage_rocksDB_blockCacheSize: "268435456"
PULSAR_PREFIX_journalMaxSizeMB: "2048"
PULSAR_PREFIX_statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
PULSAR_PREFIX_useHostNameAsBookieID: "true"
## Bookkeeper configmap
## templates/bookkeeper-service.yaml
##
service:
annotations:
publishNotReadyAddresses: "true"
ports:
- name: server
port: 3181
## Bookkeeper PodDisruptionBudget
## templates/bookkeeper-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar: Broker cluster
## templates/broker-deployment.yaml
##
broker:
component: broker
replicaCount: 3
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 15Gi
cpu: 4
## Broker configmap
## templates/broker-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms15g -Xmx15g -XX:MaxDirectMemorySize=15g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem\""
PULSAR_GC: "\"-XX:+UseG1GC -XX:MaxGCPauseMillis=10\""
PULSAR_PREFIX_managedLedgerDefaultEnsembleSize: "3"
PULSAR_PREFIX_managedLedgerDefaultWriteQuorum: "3"
PULSAR_PREFIX_managedLedgerDefaultAckQuorum: "2"
PULSAR_PREFIX_deduplicationEnabled: "false"
PULSAR_PREFIX_exposeTopicLevelMetricsInPrometheus: "true"
## Broker service
## templates/broker-service.yaml
##
service:
annotations: {}
ports:
- name: http
port: 8080
- name: pulsar
port: 6650
## Broker PodDisruptionBudget
## templates/broker-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar Extra: Proxy
## templates/proxy-deployment.yaml
##
proxy:
component: proxy
replicaCount: 3
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 4Gi
cpu: 1
## Proxy configmap
## templates/proxy-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms4g -Xmx4g -XX:MaxDirectMemorySize=4g\""
## Proxy service
## templates/proxy-service.yaml
##
service:
annotations: {}
type: NodePort
ports:
- name: http
port: 8080
nodePort: 30001
protocol: TCP
- name: tcp
port: 6650
nodePort: 30002
protocol: TCP
## Proxy PodDisruptionBudget
## templates/proxy-pdb.yaml
##
pdb:
usePolicy: yes
maxUnavailable: 1
## Pulsar Extra: Bookkeeper auto-recovery
## templates/autorecovery-deployment.yaml
##
autoRecovery:
component: autorecovery
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 1Gi
cpu: 250m
## Bookkeeper auto-recovery configmap
## templates/autorecovery-configmap.yaml
##
configData:
PULSAR_MEM: "\" -Xms1g -Xmx1g \""
## Pulsar Extra: Dashboard
## templates/dashboard-deployment.yaml
## Deprecated
##
dashboard:
component: dashboard
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-dashboard
tag: latest
pullPolicy: IfNotPresent
resources:
requests:
memory: 1Gi
cpu: 250m
## Dashboard service
## templates/dashboard-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 80
ingress:
enabled: false
annotations: {}
tls:
enabled: false
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
secretName: ""
## Required if ingress is enabled
hostname: ""
path: "/"
port: 80
## Pulsar Extra: Bastion
## templates/bastion-deployment.yaml
##
bastion:
component: bastion
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
resources:
requests:
memory: 1Gi
cpu: 250m
## Bastion configmap
## templates/bastion-configmap.yaml
##
configData:
PULSAR_MEM: "\"-Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g\""
## Monitoring Stack: Prometheus
## templates/prometheus-deployment.yaml
##
prometheus:
component: prometheus
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: prom/prometheus
tag: v1.6.3
pullPolicy: IfNotPresent
resources:
requests:
memory: 4Gi
cpu: 1
volumes:
data:
name: data
size: 50Gi
## If you already have an existent storage class and want to reuse it, you can specify its name with the option below
##
# storageClassName: existent-storage-class
#
## Instead if you want to create a new storage class define it below
## If left undefined no storage class will be defined along with PVC
##
# storageClass:
# type: pd-standard
# fsType: xfs
# provisioner: kubernetes.io/gce-pd
## Prometheus service
## templates/prometheus-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 9090
## Monitoring Stack: Grafana
## templates/grafana-deployment.yaml
##
grafana:
component: grafana
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-grafana
tag: latest
pullPolicy: IfNotPresent
resources:
requests:
memory: 4Gi
cpu: 1
## Grafana service
## templates/grafana-service.yaml
##
service:
annotations: {}
ports:
- name: server
port: 3000
plugins: []
## Grafana ingress
## templates/grafana-ingress.yaml
##
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# ingress.kubernetes.io/force-ssl-redirect: "true"
ingress.kubernetes.io/rewrite-target: /
labels: {}
tls: []
## Optional. Leave it blank if your Ingress Controller can provide a default certificate.
#- secretName: ""
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
## Required if ingress is enabled
hostname: ""
protocol: http
path: /grafana
port: 80
## Components Stack: pulsar_manager
## templates/pulsar-manager.yaml
##
pulsar_manager:
component: pulsar-manager
replicaCount: 1
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
annotations: {}
tolerations: []
gracePeriod: 0
image:
repository: apachepulsar/pulsar-manager
tag: v0.1.0
pullPolicy: IfNotPresent
resources:
requests:
memory: 250Mi
cpu: 0.1
## Pulsar manager service
## templates/pulsar-manager-service.yaml
##
service:
type: LoadBalancer
annotations: {}
ports:
- name: server
port: 9527
admin:
user: pulsar
password: pulsar