[CI] Upgrade k8s to 1.18 and also upgrade helm, kind & chart releaser versions (#192)

This commit is contained in:
Lari Hotari 2022-01-10 22:57:58 +02:00 committed by GitHub
parent cee3b5c5e6
commit 46689ab30d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 60 additions and 63 deletions

View File

@ -32,9 +32,6 @@ source ${PULSAR_HOME}/.ci/helm.sh
# create cluster # create cluster
ci::create_cluster ci::create_cluster
# install storage provisioner
ci::install_storage_provisioner
extra_opts="" extra_opts=""
if [[ "x${SYMMETRIC}" == "xtrue" ]]; then if [[ "x${SYMMETRIC}" == "xtrue" ]]; then
extra_opts="-s" extra_opts="-s"

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -80,4 +78,4 @@ auth:
# proxy to broker communication # proxy to broker communication
proxy: "proxy-admin" proxy: "proxy-admin"
# pulsar-admin client to broker/proxy communication # pulsar-admin client to broker/proxy communication
client: "admin" client: "admin"

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -80,4 +78,4 @@ auth:
# proxy to broker communication # proxy to broker communication
proxy: "proxy-admin" proxy: "proxy-admin"
# pulsar-admin client to broker/proxy communication # pulsar-admin client to broker/proxy communication
client: "admin" client: "admin"

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -61,4 +59,4 @@ proxy:
replicaCount: 1 replicaCount: 1
toolset: toolset:
useProxy: false useProxy: false

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -78,4 +76,4 @@ tls:
# disable cert-manager # disable cert-manager
certs: certs:
internal_issuer: internal_issuer:
enabled: false enabled: false

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -72,4 +70,4 @@ tls:
# disable cert manager # disable cert manager
certs: certs:
internal_issuer: internal_issuer:
enabled: false enabled: false

View File

@ -23,8 +23,6 @@ monitoring:
node_exporter: false node_exporter: false
alert_manager: false alert_manager: false
volumes:
local_storage: true
# disabled AntiAffinity # disabled AntiAffinity
affinity: affinity:
@ -74,4 +72,4 @@ tls:
# disable cert manager # disable cert manager
certs: certs:
internal_issuer: internal_issuer:
enabled: false enabled: false

View File

@ -28,6 +28,7 @@ KUBECTL=${OUTPUT_BIN}/kubectl
NAMESPACE=pulsar NAMESPACE=pulsar
CLUSTER=pulsar-ci CLUSTER=pulsar-ci
CLUSTER_ID=$(uuidgen) CLUSTER_ID=$(uuidgen)
export PATH="$OUTPUT_BIN:$PATH"
# brew package 'coreutils' is required on MacOSX # brew package 'coreutils' is required on MacOSX
# coreutils includes the 'timeout' command # coreutils includes the 'timeout' command
@ -63,21 +64,6 @@ function ci::delete_cluster() {
echo "Successfully delete a kind cluster." echo "Successfully delete a kind cluster."
} }
function ci::install_storage_provisioner() {
echo "Installing the local storage provisioner ..."
${HELM} repo add streamnative https://charts.streamnative.io
${HELM} repo update
${HELM} install local-storage-provisioner streamnative/local-storage-provisioner
WC=$(${KUBECTL} get pods --field-selector=status.phase=Running | grep local-storage-provisioner | wc -l)
while [[ ${WC} -lt 1 ]]; do
echo ${WC};
sleep 15
${KUBECTL} get pods --field-selector=status.phase=Running
WC=$(${KUBECTL} get pods --field-selector=status.phase=Running | grep local-storage-provisioner | wc -l)
done
echo "Successfully installed the local storage provisioner."
}
function ci::install_cert_manager() { function ci::install_cert_manager() {
echo "Installing the cert-manager ..." echo "Installing the cert-manager ..."
${KUBECTL} create namespace cert-manager ${KUBECTL} create namespace cert-manager

3
.gitignore vendored
View File

@ -16,3 +16,6 @@ charts/**/*.lock
PRIVATEKEY PRIVATEKEY
PUBLICKEY PUBLICKEY
.vagrant/
pulsarctl-*-*.tar.gz
pulsarctl-*-*/

View File

@ -72,9 +72,9 @@ It includes support for:
In order to use this chart to deploy Apache Pulsar on Kubernetes, the followings are required. In order to use this chart to deploy Apache Pulsar on Kubernetes, the followings are required.
1. kubectl 1.14 or higher, compatible with your cluster ([+/- 1 minor release from your cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin)) 1. kubectl 1.18 or higher, compatible with your cluster ([+/- 1 minor release from your cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin))
2. Helm v3 (3.0.2 or higher) 2. Helm v3 (3.0.2 or higher)
3. A Kubernetes cluster, version 1.14 or higher. 3. A Kubernetes cluster, version 1.18 or higher.
## Environment setup ## Environment setup
@ -100,7 +100,7 @@ helm install <release-name> apache/pulsar
## Kubernetes cluster preparation ## Kubernetes cluster preparation
You need a Kubernetes cluster whose version is 1.14 or higher in order to use this chart, due to the usage of certain Kubernetes features. You need a Kubernetes cluster whose version is 1.18 or higher in order to use this chart, due to the usage of certain Kubernetes features.
We provide some instructions to guide you through the preparation: http://pulsar.apache.org/docs/en/helm-prepare/ We provide some instructions to guide you through the preparation: http://pulsar.apache.org/docs/en/helm-prepare/

34
Vagrantfile vendored Normal file
View File

@ -0,0 +1,34 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# vagrant configuration file for setting up local environment for Pulsar Helm Chart
# CI script development.
#
# usage:
# Starting vagrant box:
# vagrant up
# Connecting to vagrant box and running a ci script:
# vagrant ssh
# byobu
# cd /vagrant
# .ci/chart_test.sh .ci/clusters/values-local-pv.yaml
# Shutting down vagrant box:
# vagrant halt
# Destroying vagrant box:
# vagrant destroy
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |vb|
vb.memory = "7168"
vb.cpus = 2
end
config.vm.provision "shell", inline: <<-SHELL
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update
sudo apt-get -y install docker.io
sudo adduser vagrant docker
echo 'PATH="/vagrant/output/bin:$PATH"' >> /home/vagrant/.profile
SHELL
end

View File

@ -17,7 +17,7 @@
# under the License. # under the License.
# #
apiVersion: v1 apiVersion: v2
appVersion: "2.7.4" appVersion: "2.7.4"
description: Apache Pulsar Helm chart for Kubernetes description: Apache Pulsar Helm chart for Kubernetes
name: pulsar name: pulsar

View File

@ -25,14 +25,15 @@ fi
OUTPUT=${PULSAR_CHART_HOME}/output OUTPUT=${PULSAR_CHART_HOME}/output
OUTPUT_BIN=${OUTPUT}/bin OUTPUT_BIN=${OUTPUT}/bin
KUBECTL_VERSION=1.14.3 KUBECTL_VERSION=1.18.20
KUBECTL_BIN=$OUTPUT_BIN/kubectl KUBECTL_BIN=$OUTPUT_BIN/kubectl
HELM_BIN=$OUTPUT_BIN/helm HELM_BIN=$OUTPUT_BIN/helm
HELM_VERSION=3.0.1 HELM_VERSION=3.7.2
KIND_VERSION=0.6.1 KIND_VERSION=0.11.1
KIND_BIN=$OUTPUT_BIN/kind KIND_BIN=$OUTPUT_BIN/kind
CR_BIN=$OUTPUT_BIN/cr CR_BIN=$OUTPUT_BIN/cr
CR_VERSION=1.0.0-beta.1 CR_VERSION=1.3.0
export PATH="$OUTPUT_BIN:$PATH"
test -d "$OUTPUT_BIN" || mkdir -p "$OUTPUT_BIN" test -d "$OUTPUT_BIN" || mkdir -p "$OUTPUT_BIN"
@ -65,7 +66,7 @@ function hack::ensure_kubectl() {
echo "Installing kubectl v$KUBECTL_VERSION..." echo "Installing kubectl v$KUBECTL_VERSION..."
tmpfile=$(mktemp) tmpfile=$(mktemp)
trap "test -f $tmpfile && rm $tmpfile" RETURN trap "test -f $tmpfile && rm $tmpfile" RETURN
curl --retry 10 -L -o $tmpfile https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl curl --retry 10 -L -o $tmpfile https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl
mv $tmpfile $KUBECTL_BIN mv $tmpfile $KUBECTL_BIN
chmod +x $KUBECTL_BIN chmod +x $KUBECTL_BIN
} }

View File

@ -25,6 +25,7 @@ source ${PULSAR_CHART_HOME}/hack/common.sh
hack::ensure_kubectl hack::ensure_kubectl
hack::ensure_helm hack::ensure_helm
hack::ensure_kind
usage() { usage() {
cat <<EOF cat <<EOF
@ -82,7 +83,7 @@ done
clusterName=${clusterName:-pulsar-dev} clusterName=${clusterName:-pulsar-dev}
nodeNum=${nodeNum:-6} nodeNum=${nodeNum:-6}
k8sVersion=${k8sVersion:-v1.14.10} k8sVersion=${k8sVersion:-v1.18.19}
volumeNum=${volumeNum:-9} volumeNum=${volumeNum:-9}
echo "clusterName: ${clusterName}" echo "clusterName: ${clusterName}"
@ -229,13 +230,6 @@ spec:
EOF EOF
$KUBECTL_BIN apply -f ${registryFile} $KUBECTL_BIN apply -f ${registryFile}
echo "init pulsar env"
$KUBECTL_BIN apply -f ${PULSAR_CHART_HOME}/manifests/local-dind/local-volume-provisioner.yaml
docker pull gcr.io/google-containers/kube-scheduler:${k8sVersion}
docker tag gcr.io/google-containers/kube-scheduler:${k8sVersion} mirantis/hypokube:final
kind load docker-image --name=${clusterName} mirantis/hypokube:final
echo "############# success create cluster:[${clusterName}] #############" echo "############# success create cluster:[${clusterName}] #############"
echo "To start using your cluster, run:" echo "To start using your cluster, run:"
@ -248,4 +242,4 @@ because http proxy is inaccessible.
If you cannot remove http proxy settings, you can either whitelist image If you cannot remove http proxy settings, you can either whitelist image
domains in NO_PROXY environment or use 'docker pull <image> && kind load domains in NO_PROXY environment or use 'docker pull <image> && kind load
docker-image <image>' command to load images into nodes. docker-image <image>' command to load images into nodes.
EOF EOF

View File

@ -21,11 +21,11 @@
NAMESPACE=cert-manager NAMESPACE=cert-manager
NAME=cert-manager NAME=cert-manager
VERSION=v0.13.0 VERSION=v1.1.0
# Install cert-manager CustomResourceDefinition resources # Install cert-manager CustomResourceDefinition resources
echo "Installing cert-manager CRD resources ..." echo "Installing cert-manager CRD resources ..."
kubectl apply --validate=false -f https://raw.githubusercontent.com/jetstack/cert-manager/${VERSION}/deploy/manifests/00-crds.yaml kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/${VERSION}/cert-manager.crds.yaml
# Create the namespace # Create the namespace
kubectl get ns ${NAMESPACE} kubectl get ns ${NAMESPACE}
@ -52,4 +52,4 @@ helm install \
--version ${VERSION} \ --version ${VERSION} \
${NAME} \ ${NAME} \
jetstack/cert-manager jetstack/cert-manager
echo "Successfully installed cert-manager ${VERSION}." echo "Successfully installed cert-manager ${VERSION}."