diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f9811dee8..409bc3775 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,5 +12,7 @@ jobs: with: go-version: '^1.20' - uses: actions/checkout@v3 + - name: Basic tests + run: make check - name: envtest - run: ENVTEST_K8S_VERSION=1.22 make check + run: ENVTEST_K8S_VERSION=1.22 make check-envtest diff --git a/.golangci.yaml b/.golangci.yaml index c8ab4653b..9891a55f5 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -7,8 +7,8 @@ linters: - gci - gofumpt - scopelint - - depguard enable: + - depguard - gomodguard - gosimple - importas @@ -28,21 +28,24 @@ linters-settings: depguard: rules: - main: - allow: - - $gostd - - github.com/OpenPeeDeeP - include-go-root: true - packages-with-error-message: - - io/ioutil: > - Use the "io" and "os" packages instead. - See https://go.dev/doc/go1.16#ioutil + everything: + deny: + - pkg: io/ioutil + desc: > + Use the "io" and "os" packages instead. + See https://go.dev/doc/go1.16#ioutil - - net/http/httptest: Should be used only in tests. - - testing/*: The "testing" packages should be used only in tests. + not-tests: + files: ['!$test'] + deny: + - pkg: net/http/httptest + desc: Should be used only in tests. - - github.com/percona/percona-postgresql-operator/internal/testing/*: > - The "internal/testing" packages should be used only in tests. + - pkg: testing/* + desc: The "testing" packages should be used only in tests. + + - pkg: github.com/percona/percona-postgresql-operator/internal/testing/* + desc: The "internal/testing" packages should be used only in tests. exhaustive: default-signifies-exhaustive: true @@ -71,14 +74,6 @@ linters-settings: alias: k8serrors no-unaliased: true -issues: - exclude-rules: - # These testing packages are allowed in test files. The packages are - # disallowed everywhere then ignored here because that is how depguard works. - - linters: [depguard] - path: _test[.]go$ - text: \`(net/http/httptest|[^`]*testing[^`]*)` - run: build-tags: - envtest diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 180ffe756..079328f63 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -54,4 +54,4 @@ See the [Documentation Contribution Guide](https://github.com/percona/k8spg-docs ## Code review -Your contribution will be reviewed by other developers contributing to the project. The more complex your changes are, the more experts will be involved. You will receive feedback and recommendations directly on your pull request on GitHub, so keep an eye on your submission and be prepared to make further amendments. The developers might even provide some concrete suggestions on modifying your code to match the project’s expectations better. \ No newline at end of file +Your contribution will be reviewed by other developers contributing to the project. The more complex your changes are, the more experts will be involved. You will receive feedback and recommendations directly on your pull request on GitHub, so keep an eye on your submission and be prepared to make further amendments. The developers might even provide some concrete suggestions on modifying your code to match the project’s expectations better. diff --git a/Makefile b/Makefile index 3334aa92f..67753524b 100644 --- a/Makefile +++ b/Makefile @@ -1,322 +1,293 @@ +PGO_IMAGE_NAME ?= percona-postgresql-operator +PGO_IMAGE_MAINTAINER ?= Percona +PGO_IMAGE_SUMMARY ?= Percona PostgreSQL Operator +PGO_IMAGE_DESCRIPTION ?= $(PGO_IMAGE_SUMMARY) +PGO_IMAGE_URL ?= https://github.com/percona/percona-postgresql-operator +PGO_IMAGE_PREFIX ?= localhost -# Default values if not already set -NAME ?= percona-postgresql-operator -VERSION ?= $(shell git rev-parse --abbrev-ref HEAD | sed -e 's^/^-^g; s^[.]^-^g;' | tr '[:upper:]' '[:lower:]') -ROOT_REPO ?= ${PWD} -IMAGE_TAG_BASE ?= perconalab/$(NAME) -IMAGE ?= $(IMAGE_TAG_BASE):$(VERSION) +PGMONITOR_DIR ?= hack/tools/pgmonitor +PGMONITOR_VERSION ?= v4.8.1 +QUERIES_CONFIG_DIR ?= hack/tools/queries -PGOROOT ?= $(CURDIR) -PGO_BASEOS ?= ubi8 -PGO_IMAGE_PREFIX ?= crunchydata -PGO_IMAGE_TAG ?= $(PGO_BASEOS)-$(PGO_VERSION) -PGO_VERSION ?= $(shell git describe --tags) -PGO_PG_VERSION ?= 14 -PGO_PG_FULLVERSION ?= 14.5 -PGO_KUBE_CLIENT ?= kubectl - -RELTMPDIR=/tmp/release.$(PGO_VERSION) -RELFILE=/tmp/postgres-operator.$(PGO_VERSION).tar.gz - -# Valid values: buildah (default), docker -IMGBUILDER ?= buildah -# Determines whether or not rootless builds are enabled -IMG_ROOTLESS_BUILD ?= false -# The utility to use when pushing/pulling to and from an image repo (e.g. docker or buildah) -IMG_PUSHER_PULLER ?= docker -# Determines whether or not images should be pushed to the local docker daemon when building with -# a tool other than docker (e.g. when building with buildah) -IMG_PUSH_TO_DOCKER_DAEMON ?= true -# Defines the sudo command that should be prepended to various build commands when rootless builds are -# not enabled -IMGCMDSUDO= -ifneq ("$(IMG_ROOTLESS_BUILD)", "true") - IMGCMDSUDO=sudo --preserve-env -endif -IMGCMDSTEM=$(IMGCMDSUDO) buildah bud --layers $(SQUASH) - -# Default the buildah format to docker to ensure it is possible to pull the images from a docker -# repository using docker (otherwise the images may not be recognized) -export BUILDAH_FORMAT ?= docker - -# Allows simplification of IMGBUILDER switching -ifeq ("$(IMGBUILDER)","docker") - IMGCMDSTEM=docker build -endif - -# set the proper packager, registry and base image based on the PGO_BASEOS configured -DOCKERBASEREGISTRY= -BASE_IMAGE_OS= -ifeq ("$(PGO_BASEOS)", "ubi8") - BASE_IMAGE_OS=ubi8-minimal - DOCKERBASEREGISTRY=registry.access.redhat.com/ - PACKAGER=microdnf -endif +# Buildah's "build" used to be "bud". Use the alias to be compatible for a while. +BUILDAH_BUILD ?= buildah bud DEBUG_BUILD ?= false GO ?= go GO_BUILD = $(GO_CMD) build -trimpath GO_CMD = $(GO_ENV) $(GO) GO_TEST ?= $(GO) test -KUTTL_TEST ?= kuttl test +KUTTL ?= kubectl-kuttl +KUTTL_TEST ?= $(KUTTL) test # Disable optimizations if creating a debug build ifeq ("$(DEBUG_BUILD)", "true") GO_BUILD = $(GO_CMD) build -gcflags='all=-N -l' endif -# To build a specific image, run 'make -image' (e.g. 'make postgres-operator-image') -images = postgres-operator \ - crunchy-postgres-exporter - -.PHONY: all setup clean push pull release deploy - - -#======= Main functions ======= +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-formatting the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +.PHONY: all +all: ## Build all images all: build-docker-image -setup: - PGOROOT='$(PGOROOT)' ./bin/get-deps.sh - ./bin/check-deps.sh +.PHONY: setup +setup: ## Run Setup needed to build images +setup: get-pgmonitor + +.PHONY: get-pgmonitor +get-pgmonitor: + git -C '$(dir $(PGMONITOR_DIR))' clone https://github.com/CrunchyData/pgmonitor.git || git -C '$(PGMONITOR_DIR)' fetch origin + @git -C '$(PGMONITOR_DIR)' checkout '$(PGMONITOR_VERSION)' + @git -C '$(PGMONITOR_DIR)' config pull.ff only + [ -d '${QUERIES_CONFIG_DIR}' ] || mkdir -p '${QUERIES_CONFIG_DIR}' + cp -r '$(PGMONITOR_DIR)/postgres_exporter/common/.' '${QUERIES_CONFIG_DIR}' + cp '$(PGMONITOR_DIR)/postgres_exporter/linux/queries_backrest.yml' '${QUERIES_CONFIG_DIR}' + +.PHONY: clean +clean: ## Clean resources +clean: clean-deprecated + rm -f bin/postgres-operator + rm -f config/rbac/role.yaml + [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated + [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other + rm -rf build/crd/generated build/crd/*/generated + [ ! -f hack/tools/setup-envtest ] || hack/tools/setup-envtest --bin-dir=hack/tools/envtest cleanup + [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest + [ ! -d hack/tools/envtest ] || rm -r hack/tools/envtest + [ ! -d hack/tools/pgmonitor ] || rm -rf hack/tools/pgmonitor + [ ! -n "$$(ls hack/tools)" ] || rm -r hack/tools/* + [ ! -d hack/.kube ] || rm -r hack/.kube + +.PHONY: clean-deprecated +clean-deprecated: ## Clean deprecated resources + @# packages used to be downloaded into the vendor directory + [ ! -d vendor ] || rm -r vendor + @# executables used to be compiled into the $GOBIN directory + [ ! -n '$(GOBIN)' ] || rm -f $(GOBIN)/postgres-operator $(GOBIN)/apiserver $(GOBIN)/*pgo + @# executables used to be in subdirectories + [ ! -d bin/pgo-rmdata ] || rm -r bin/pgo-rmdata + [ ! -d bin/pgo-backrest ] || rm -r bin/pgo-backrest + [ ! -d bin/pgo-scheduler ] || rm -r bin/pgo-scheduler + [ ! -d bin/postgres-operator ] || rm -r bin/postgres-operator + @# keys used to be generated before install + [ ! -d conf/pgo-backrest-repo ] || rm -r conf/pgo-backrest-repo + [ ! -d conf/postgres-operator ] || rm -r conf/postgres-operator + @# crunchy-postgres-exporter used to live in this repo + [ ! -d bin/crunchy-postgres-exporter ] || rm -r bin/crunchy-postgres-exporter + [ ! -d build/crunchy-postgres-exporter ] || rm -r build/crunchy-postgres-exporter -#=== postgrescluster CRD === -# Create operator and target namespaces -createnamespaces: - $(PGO_KUBE_CLIENT) apply -k ./config/namespace +##@ Deployment +.PHONY: createnamespaces +createnamespaces: ## Create operator and target namespaces + kubectl apply -k ./config/namespace -# Delete operator and target namespaces -deletenamespaces: - $(PGO_KUBE_CLIENT) delete -k ./config/namespace +.PHONY: deletenamespaces +deletenamespaces: ## Delete operator and target namespaces + kubectl delete -k ./config/namespace -# Install the postgrescluster CRD -install: - $(PGO_KUBE_CLIENT) apply --server-side -f ./deploy/crd.yaml - $(PGO_KUBE_CLIENT) apply --server-side -f ./deploy/rbac.yaml +.PHONY: install +install: ## Install the postgrescluster CRD + kubectl apply --server-side -f ./deploy/crd.yaml + kubectl apply --server-side -f ./deploy/rbac.yaml -# Delete the postgrescluster CRD -uninstall: - $(PGO_KUBE_CLIENT) delete -f ./deploy/crd.yaml - $(PGO_KUBE_CLIENT) delete -f ./deploy/rbac.yaml +.PHONY: uninstall +uninstall: ## Delete the postgrescluster CRD + kubectl delete -f ./deploy/crd.yaml + kubectl delete -f ./deploy/rbac.yaml -# Deploy the PostgreSQL Operator (enables the postgrescluster controller) -deploy: +.PHONY: deploy +deploy: ## Deploy the PostgreSQL Operator (enables the postgrescluster controller) yq eval '(.spec.template.spec.containers[] | select(.name=="operator")).image = "$(IMAGE)"' ./deploy/operator.yaml \ - | $(PGO_KUBE_CLIENT) apply -f - - -# Deploy the PostgreSQL Operator locally -deploy-dev: build-postgres-operator createnamespaces - $(PGO_KUBE_CLIENT) apply --server-side -k ./config/dev + | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy the PostgreSQL Operator + kubectl delete -f ./deploy/operator.yaml + +.PHONY: deploy-dev +deploy-dev: ## Deploy the PostgreSQL Operator locally +deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true" +deploy-dev: get-pgmonitor +deploy-dev: build-postgres-operator +deploy-dev: createnamespaces + kubectl apply --server-side -k ./config/dev hack/create-kubeconfig.sh postgres-operator pgo env \ + QUERIES_CONFIG_DIR="${QUERIES_CONFIG_DIR}" \ CRUNCHY_DEBUG=true \ - CHECK_FOR_UPGRADES=false \ + PGO_FEATURE_GATES="${PGO_FEATURE_GATES}" \ + CHECK_FOR_UPGRADES='$(if $(CHECK_FOR_UPGRADES),$(CHECK_FOR_UPGRADES),false)' \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ - $(shell $(PGO_KUBE_CLIENT) kustomize ./config/dev | \ + PGO_NAMESPACE='postgres-operator' \ + $(shell $(KUSTOMIZE) build ./config/dev | \ sed -ne '/^kind: Deployment/,/^---/ { \ /RELATED_IMAGE_/ { N; s,.*\(RELATED_[^[:space:]]*\).*value:[[:space:]]*\([^[:space:]]*\),\1="\2",; p; }; \ }') \ $(foreach v,$(filter RELATED_IMAGE_%,$(.VARIABLES)),$(v)="$($(v))") \ bin/postgres-operator -# Undeploy the PostgreSQL Operator -undeploy: - $(PGO_KUBE_CLIENT) delete -f ./deploy/operator.yaml - - -run-local: - ./bin/postgres-operator - - -#======= Binary builds ======= -build-postgres-operator: - $(GO_BUILD) -ldflags '-X "main.versionString=$(VERSION)"' \ +##@ Build - Binary +.PHONY: build-postgres-operator +build-postgres-operator: ## Build the postgres-operator binary + $(GO_BUILD) -ldflags '-X "main.versionString=$(PGO_VERSION)"' \ -o bin/postgres-operator ./cmd/postgres-operator -build-pgo-%: - $(info No binary build needed for $@) - -build-crunchy-postgres-exporter: - $(info No binary build needed for $@) - - -#======= Image builds ======= -build-docker-image: - ROOT_REPO=$(ROOT_REPO) VERSION=$(VERSION) IMAGE=$(IMAGE) $(ROOT_REPO)/e2e-tests/build - -$(PGOROOT)/build/%/Dockerfile: - $(error No Dockerfile found for $* naming pattern: [$@]) - -crunchy-postgres-exporter-img-build: pgo-base-$(IMGBUILDER) build-crunchy-postgres-exporter $(PGOROOT)/build/crunchy-postgres-exporter/Dockerfile - $(IMGCMDSTEM) \ - -f $(PGOROOT)/build/crunchy-postgres-exporter/Dockerfile \ - -t $(PGO_IMAGE_PREFIX)/crunchy-postgres-exporter:$(PGO_IMAGE_TAG) \ - --build-arg BASEOS=$(PGO_BASEOS) \ - --build-arg BASEVER=$(PGO_VERSION) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - --build-arg PREFIX=$(PGO_IMAGE_PREFIX) \ - $(PGOROOT) - -postgres-operator-img-build: build-postgres-operator $(PGOROOT)/build/postgres-operator/Dockerfile - $(IMGCMDSTEM) \ - -f $(PGOROOT)/build/postgres-operator/Dockerfile \ - -t $(PGO_IMAGE_PREFIX)/postgres-operator:$(PGO_IMAGE_TAG) \ - --build-arg BASE_IMAGE_OS=$(BASE_IMAGE_OS) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - --build-arg RELVER=$(PGO_VERSION) \ - --build-arg DOCKERBASEREGISTRY=$(DOCKERBASEREGISTRY) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PG_FULL=$(PGO_PG_FULLVERSION) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - $(PGOROOT) - -%-img-buildah: %-img-build ; -# only push to docker daemon if variable PGO_PUSH_TO_DOCKER_DAEMON is set to "true" -ifeq ("$(IMG_PUSH_TO_DOCKER_DAEMON)", "true") - $(IMGCMDSUDO) buildah push $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) docker-daemon:$(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) -endif - -%-img-docker: %-img-build ; - -%-image: %-img-$(IMGBUILDER) ; - -pgo-base: pgo-base-$(IMGBUILDER) - -pgo-base-build: $(PGOROOT)/build/pgo-base/Dockerfile licenses - $(IMGCMDSTEM) \ - -f $(PGOROOT)/build/pgo-base/Dockerfile \ - -t $(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) \ - --build-arg BASE_IMAGE_OS=$(BASE_IMAGE_OS) \ - --build-arg BASEOS=$(PGO_BASEOS) \ - --build-arg RELVER=$(PGO_VERSION) \ - --build-arg DOCKERBASEREGISTRY=$(DOCKERBASEREGISTRY) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PG_FULL=$(PGO_PG_FULLVERSION) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - $(PGOROOT) - -pgo-base-buildah: pgo-base-build ; -# only push to docker daemon if variable PGO_PUSH_TO_DOCKER_DAEMON is set to "true" -ifeq ("$(IMG_PUSH_TO_DOCKER_DAEMON)", "true") - $(IMGCMDSUDO) buildah push $(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) docker-daemon:$(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) -endif - -pgo-base-docker: pgo-base-build - -#======== Utility ======= -ENVTEST = $(shell pwd)/bin/setup-envtest -envtest: ## Download envtest-setup locally if necessary. - $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) - +##@ Build - Images +.PHONY: build-postgres-operator-image +build-postgres-operator-image: ## Build the postgres-operator image +build-postgres-operator-image: PGO_IMAGE_REVISION := $(shell git rev-parse HEAD) +build-postgres-operator-image: PGO_IMAGE_TIMESTAMP := $(shell date -u +%FT%TZ) +build-postgres-operator-image: build-postgres-operator +build-postgres-operator-image: build/postgres-operator/Dockerfile + $(if $(shell (echo 'buildah version 1.24'; $(word 1,$(BUILDAH_BUILD)) --version) | sort -Vc 2>&1), \ + $(warning WARNING: old buildah does not invalidate its cache for changed labels: \ + https://github.com/containers/buildah/issues/3517)) + $(if $(IMAGE_TAG),, $(error missing IMAGE_TAG)) + $(strip $(BUILDAH_BUILD)) \ + --tag $(BUILDAH_TRANSPORT)$(PGO_IMAGE_PREFIX)/$(PGO_IMAGE_NAME):$(IMAGE_TAG) \ + --label name='$(PGO_IMAGE_NAME)' \ + --label build-date='$(PGO_IMAGE_TIMESTAMP)' \ + --label description='$(PGO_IMAGE_DESCRIPTION)' \ + --label maintainer='$(PGO_IMAGE_MAINTAINER)' \ + --label summary='$(PGO_IMAGE_SUMMARY)' \ + --label url='$(PGO_IMAGE_URL)' \ + --label vcs-ref='$(PGO_IMAGE_REVISION)' \ + --label vendor='$(PGO_IMAGE_MAINTAINER)' \ + --label io.k8s.display-name='$(PGO_IMAGE_NAME)' \ + --label io.k8s.description='$(PGO_IMAGE_DESCRIPTION)' \ + --label io.openshift.tags="postgresql,postgres,sql,nosql,crunchy" \ + --annotation org.opencontainers.image.authors='$(PGO_IMAGE_MAINTAINER)' \ + --annotation org.opencontainers.image.vendor='$(PGO_IMAGE_MAINTAINER)' \ + --annotation org.opencontainers.image.created='$(PGO_IMAGE_TIMESTAMP)' \ + --annotation org.opencontainers.image.description='$(PGO_IMAGE_DESCRIPTION)' \ + --annotation org.opencontainers.image.revision='$(PGO_IMAGE_REVISION)' \ + --annotation org.opencontainers.image.title='$(PGO_IMAGE_SUMMARY)' \ + --annotation org.opencontainers.image.url='$(PGO_IMAGE_URL)' \ + $(if $(PGO_VERSION),$(strip \ + --label release='$(PGO_VERSION)' \ + --label version='$(PGO_VERSION)' \ + --annotation org.opencontainers.image.version='$(PGO_VERSION)' \ + )) \ + --file $< --format docker --layers . + +##@ Test .PHONY: check -check: envtest - PGO_NAMESPACE="postgres-operator" KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" $(GO_TEST) -coverprofile cover.out ./... +check: ## Run basic go tests with coverage output + $(GO_TEST) -cover ./... # Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' # - KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true .PHONY: check-envtest +check-envtest: ## Run check using envtest and a mock kube api check-envtest: ENVTEST_USE = hack/tools/setup-envtest --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) check-envtest: SHELL = bash -check-envtest: +check-envtest: get-pgmonitor GOBIN='$(CURDIR)/hack/tools' $(GO) install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest @$(ENVTEST_USE) --print=overview && echo - source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" $(GO_TEST) -count=1 -cover -tags=envtest ./... + source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ + $(GO_TEST) -count=1 -cover -tags=envtest ./... -# - PGO_TEST_TIMEOUT_SCALE=1 +# The "PGO_TEST_TIMEOUT_SCALE" environment variable (default: 1) can be set to a +# positive number that extends test timeouts. The following runs tests with +# timeouts that are 20% longer than normal: +# make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing +check-envtest-existing: ## Run check using envtest and an existing kube api +check-envtest-existing: get-pgmonitor check-envtest-existing: createnamespaces - ${PGO_KUBE_CLIENT} apply --server-side -k ./config/dev - USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" $(GO_TEST) -count=1 -cover -p=1 -tags=envtest ./... - ${PGO_KUBE_CLIENT} delete -k ./config/dev + kubectl apply --server-side -k ./config/dev + USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ + $(GO_TEST) -count=1 -cover -p=1 -tags=envtest ./... + kubectl delete -k ./config/dev # Expects operator to be running .PHONY: check-kuttl -check-kuttl: - ${PGO_KUBE_CLIENT} ${KUTTL_TEST} \ +check-kuttl: ## Run kuttl end-to-end tests +check-kuttl: ## example command: make check-kuttl KUTTL_TEST=' + ${KUTTL_TEST} \ --config testing/kuttl/kuttl-test.yaml .PHONY: generate-kuttl -generate-kuttl: export KUTTL_PG_VERSION ?= 14 -generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.1 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:centos8-14.2-0 -generate-kuttl: +generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 14 +generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 15 +generate-kuttl: export KUTTL_PG_VERSION ?= 15 +generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.3 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0 +generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace +generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other bash -ceu ' \ - render() { envsubst '"'"'$$KUTTL_PG_VERSION $$KUTTL_POSTGIS_VERSION $$KUTTL_PSQL_IMAGE'"'"'; }; \ + case $(KUTTL_PG_VERSION) in \ + 15 ) export KUTTL_BITNAMI_IMAGE_TAG=15.0.0-debian-11-r4 ;; \ + 14 ) export KUTTL_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ + 13 ) export KUTTL_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ + 12 ) export KUTTL_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ + 11 ) export KUTTL_BITNAMI_IMAGE_TAG=11.17.0-debian-11-r39 ;; \ + esac; \ + render() { envsubst '"'"' \ + $$KUTTL_PG_UPGRADE_FROM_VERSION $$KUTTL_PG_UPGRADE_TO_VERSION \ + $$KUTTL_PG_VERSION $$KUTTL_POSTGIS_VERSION $$KUTTL_PSQL_IMAGE \ + $$KUTTL_BITNAMI_IMAGE_TAG $$KUTTL_TEST_DELETE_NAMESPACE'"'"'; }; \ while [ $$# -gt 0 ]; do \ source="$${1}" target="$${1/e2e/e2e-generated}"; \ mkdir -p "$${target%/*}"; render < "$${source}" > "$${target}"; \ shift; \ - done' - $(wildcard testing/kuttl/e2e/*/*.yaml) $(wildcard testing/kuttl/e2e-other/*/*.yaml) + done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e-other/*/*.yaml + +##@ Generate .PHONY: check-generate -check-generate: generate-crd generate-deepcopy generate-rbac +check-generate: ## Check crd, crd-docs, deepcopy functions, and rbac generation +check-generate: generate-crd +check-generate: generate-deepcopy +check-generate: generate-rbac git diff --exit-code -- config/crd git diff --exit-code -- config/rbac git diff --exit-code -- pkg/apis -clean: clean-deprecated - rm -f bin/postgres-operator - rm -f config/rbac/role.yaml - [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated - [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other - [ ! -d build/crd/generated ] || rm -r build/crd/generated - [ ! -f hack/tools/setup-envtest ] || hack/tools/setup-envtest --bin-dir=hack/tools/envtest cleanup - [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest - [ ! -d hack/tools/envtest ] || rm -r hack/tools/envtest - [ ! -n "$$(ls hack/tools)" ] || rm hack/tools/* - [ ! -d hack/.kube ] || rm -r hack/.kube +.PHONY: generate +generate: ## Generate crd, crd-docs, deepcopy functions, and rbac +generate: generate-crd +generate: generate-crd-docs +generate: generate-deepcopy +generate: generate-rbac -clean-deprecated: - @# packages used to be downloaded into the vendor directory - [ ! -d vendor ] || rm -r vendor - @# executables used to be compiled into the $GOBIN directory - [ ! -n '$(GOBIN)' ] || rm -f $(GOBIN)/postgres-operator $(GOBIN)/apiserver $(GOBIN)/*pgo - @# executables used to be in subdirectories - [ ! -d bin/pgo-rmdata ] || rm -r bin/pgo-rmdata - [ ! -d bin/pgo-backrest ] || rm -r bin/pgo-backrest - [ ! -d bin/pgo-scheduler ] || rm -r bin/pgo-scheduler - [ ! -d bin/postgres-operator ] || rm -r bin/postgres-operator - @# keys used to be generated before install - [ ! -d conf/pgo-backrest-repo ] || rm -r conf/pgo-backrest-repo - [ ! -d conf/postgres-operator ] || rm -r conf/postgres-operator - -push: $(images:%=push-%) ; - -push-%: - $(IMG_PUSHER_PULLER) push $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) - -pull: $(images:%=pull-%) ; - -pull-%: - $(IMG_PUSHER_PULLER) pull $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) - -generate: kustomize generate-crd generate-deepcopy generate-rbac generate-manager generate-bundle generate-cw - -generate-crunchy-crd: +.PHONY: generate-crunchy-crd +generate-crunchy-crd: ## Generate crd GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ crd:crdVersions='v1' \ paths='./pkg/apis/postgres-operator.crunchydata.com/...' \ output:dir='build/crd/crunchy/generated' # build/crd/generated/{group}_{plural}.yaml - $(PGO_KUBE_CLIENT) kustomize ./build/crd/crunchy/ > ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml - -generate-percona-crd: + @ GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ crd:crdVersions='v1' \ - paths='./pkg/apis/pgv2.percona.com/...' \ - output:dir='build/crd/percona/generated' # build/crd/generated/{group}_{plural}.yaml - $(PGO_KUBE_CLIENT) kustomize ./build/crd/percona/ > ./config/crd/bases/pgv2.percona.com_perconapgclusters.yaml - -generate-crd: generate-crunchy-crd generate-percona-crd - cat ./config/crd/bases/pgv2.percona.com_perconapgclusters.yaml > ./deploy/crd.yaml - echo --- >> ./deploy/crd.yaml - cat ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml >> ./deploy/crd.yaml - -generate-crd-docs: - GOBIN='$(CURDIR)/hack/tools' go install fybrik.io/crdoc@v0.5.2 + paths='./pkg/apis/...' \ + output:dir='build/crd/pgupgrades/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml + @ + $(KUSTOMIZE) build ./build/crd/crunchy/ > ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml + $(KUSTOMIZE) build ./build/crd/pgupgrades > ./config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml + +.PHONY: generate-crd-docs +generate-crd-docs: ## Generate crd-docs + GOBIN='$(CURDIR)/hack/tools' $(GO) install fybrik.io/crdoc@v0.5.2 ./hack/tools/crdoc \ --resources ./config/crd/bases \ --template ./hack/api-template.tmpl \ @@ -327,11 +298,73 @@ generate-deepcopy: object:headerFile='hack/boilerplate.go.txt' \ paths='./pkg/apis/...' -generate-rbac: +.PHONY: generate-rbac +generate-rbac: ## Generate rbac GOBIN='$(CURDIR)/hack/tools' ./hack/generate-rbac.sh \ './...' 'config/rbac/' $(KUSTOMIZE) build ./config/rbac/namespace/ > ./deploy/rbac.yaml + +##@ Release + +.PHONY: license licenses +license: licenses +licenses: ## Aggregate license files + ./bin/license_aggregator.sh ./cmd/... + +.PHONY: release-postgres-operator-image release-postgres-operator-image-labels +release-postgres-operator-image: ## Build the postgres-operator image and all its prerequisites +release-postgres-operator-image: release-postgres-operator-image-labels +release-postgres-operator-image: licenses +release-postgres-operator-image: build-postgres-operator-image +release-postgres-operator-image-labels: + $(if $(PGO_IMAGE_DESCRIPTION),, $(error missing PGO_IMAGE_DESCRIPTION)) + $(if $(PGO_IMAGE_MAINTAINER),, $(error missing PGO_IMAGE_MAINTAINER)) + $(if $(PGO_IMAGE_NAME),, $(error missing PGO_IMAGE_NAME)) + $(if $(PGO_IMAGE_SUMMARY),, $(error missing PGO_IMAGE_SUMMARY)) + $(if $(PGO_VERSION),, $(error missing PGO_VERSION)) + +##@ Percona + +# Default values if not already set +NAME ?= percona-postgresql-operator +VERSION ?= $(shell git rev-parse --abbrev-ref HEAD | sed -e 's^/^-^g; s^[.]^-^g;' | tr '[:upper:]' '[:lower:]') +ROOT_REPO ?= ${PWD} +IMAGE_TAG_BASE ?= perconalab/$(NAME) +IMAGE ?= $(IMAGE_TAG_BASE):$(VERSION) +PGO_VERSION ?= $(shell git describe --tags) + +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: ## Download kustomize locally if necessary. + $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v4@v4.5.3) + +# Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' +# - ENVTEST_K8S_VERSION=1.19.2 +hack/tools/envtest: SHELL = bash +hack/tools/envtest: + source '$(shell $(GO) list -f '{{ .Dir }}' -m 'sigs.k8s.io/controller-runtime')/hack/setup-envtest.sh' && fetch_envtest_tools $@ + +ENVTEST = $(shell pwd)/bin/setup-envtest +envtest: ## Download envtest-setup locally if necessary. + $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) + +build-docker-image: + ROOT_REPO=$(ROOT_REPO) VERSION=$(VERSION) IMAGE=$(IMAGE) $(ROOT_REPO)/e2e-tests/build + +generate: kustomize generate-crd generate-deepcopy generate-rbac generate-manager generate-bundle generate-cw + +generate-crd: generate-crunchy-crd generate-percona-crd + cat ./config/crd/bases/pgv2.percona.com_perconapgclusters.yaml > ./deploy/crd.yaml + echo --- >> ./deploy/crd.yaml + cat ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml >> ./deploy/crd.yaml + +generate-percona-crd: + GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ + crd:crdVersions='v1' \ + paths='./pkg/apis/pgv2.percona.com/...' \ + output:dir='build/crd/percona/generated' # build/crd/generated/{group}_{plural}.yaml + $(KUSTOMIZE) build ./build/crd/percona/ > ./config/crd/bases/pgv2.percona.com_perconapgclusters.yaml + generate-manager: cd ./config/manager/namespace/ && $(KUSTOMIZE) edit set image postgres-operator=$(IMAGE) $(KUSTOMIZE) build ./config/manager/namespace/ > ./deploy/operator.yaml @@ -364,20 +397,8 @@ generate-versionservice-client: swagger rm -rf $(VERSION_SERVICE_CLIENT_PATH)/client $(SWAGGER) generate client -f $(VERSION_SERVICE_CLIENT_PATH)/version.swagger.yaml -c $(VERSION_SERVICE_CLIENT_PATH)/client -m $(VERSION_SERVICE_CLIENT_PATH)/client/models -# Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' -# - ENVTEST_K8S_VERSION=1.19.2 -hack/tools/envtest: SHELL = bash -hack/tools/envtest: - source '$(shell $(GO) list -f '{{ .Dir }}' -m 'sigs.k8s.io/controller-runtime')/hack/setup-envtest.sh' && fetch_envtest_tools $@ - -.PHONY: license licenses -license: licenses -licenses: - ./bin/license_aggregator.sh ./cmd/... - -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v4@v4.5.3) +run-local: + ./bin/postgres-operator # go-get-tool will 'go get' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) @@ -392,3 +413,4 @@ GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ rm -rf $$TMP_DIR ;\ } endef + diff --git a/bin/crunchy-postgres-exporter/.gitignore b/bin/crunchy-postgres-exporter/.gitignore deleted file mode 100644 index bd718d9cd..000000000 --- a/bin/crunchy-postgres-exporter/.gitignore +++ /dev/null @@ -1 +0,0 @@ -collectserver diff --git a/bin/crunchy-postgres-exporter/common_lib.sh b/bin/crunchy-postgres-exporter/common_lib.sh deleted file mode 100755 index 720acb446..000000000 --- a/bin/crunchy-postgres-exporter/common_lib.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -RED="\033[0;31m" -GREEN="\033[0;32m" -YELLOW="\033[0;33m" -RESET="\033[0m" - -function enable_debugging() { - if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] - then - echo_info "Turning debugging on.." - export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' - set -x - fi -} - -function env_check_err() { - if [[ -z ${!1} ]] - then - echo_err "$1 environment variable is not set, aborting." - exit 1 - fi -} - -function echo_err() { - echo -e "${RED?}$(date) ERROR: ${1?}${RESET?}" -} - -function echo_info() { - echo -e "${GREEN?}$(date) INFO: ${1?}${RESET?}" -} - -function echo_warn() { - echo -e "${YELLOW?}$(date) WARN: ${1?}${RESET?}" -} diff --git a/bin/crunchy-postgres-exporter/start.sh b/bin/crunchy-postgres-exporter/start.sh deleted file mode 100755 index f5992915e..000000000 --- a/bin/crunchy-postgres-exporter/start.sh +++ /dev/null @@ -1,273 +0,0 @@ -#!/bin/bash - -# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source /opt/cpm/bin/common_lib.sh -enable_debugging - -export PG_EXP_HOME=$(find /opt/cpm/bin/ -type d -name 'postgres_exporter*') -export PG_DIR=$(find /usr/ -type d -name 'pgsql-*') -POSTGRES_EXPORTER_PIDFILE=/tmp/postgres_exporter.pid -CONFIG_DIR='/opt/cpm/conf' -QUERIES=( - queries_backrest - queries_global - queries_per_db - queries_nodemx -) - -function trap_sigterm() { - echo_info "Doing trap logic.." - - echo_warn "Clean shutdown of postgres-exporter.." - kill -SIGINT $(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) -} - -# Set default env vars for the postgres exporter container -set_default_postgres_exporter_env() { - if [[ ! -v POSTGRES_EXPORTER_PORT ]] - then - export POSTGRES_EXPORTER_PORT="9187" - default_exporter_env_vars+=("POSTGRES_EXPORTER_PORT=${POSTGRES_EXPORTER_PORT}") - fi -} - -# Set default PG env vars for the exporter container -set_default_pg_exporter_env() { - - if [[ ! -v EXPORTER_PG_HOST ]] - then - export EXPORTER_PG_HOST="127.0.0.1" - default_exporter_env_vars+=("EXPORTER_PG_HOST=${EXPORTER_PG_HOST}") - fi - - if [[ ! -v EXPORTER_PG_PORT ]] - then - export EXPORTER_PG_PORT="5432" - default_exporter_env_vars+=("EXPORTER_PG_PORT=${EXPORTER_PG_PORT}") - fi - - if [[ ! -v EXPORTER_PG_DATABASE ]] - then - export EXPORTER_PG_DATABASE="postgres" - default_exporter_env_vars+=("EXPORTER_PG_DATABASE=${EXPORTER_PG_DATABASE}") - fi - - if [[ ! -v EXPORTER_PG_USER ]] - then - export EXPORTER_PG_USER="ccp_monitoring" - default_exporter_env_vars+=("EXPORTER_PG_USER=${EXPORTER_PG_USER}") - fi - - env_check_err "EXPORTER_PG_PASSWORD" -} - -trap 'trap_sigterm' SIGINT SIGTERM - -set_default_postgres_exporter_env -set_default_pg_exporter_env - -if [[ ! ${#default_exporter_env_vars[@]} -eq 0 ]] -then - echo_info "Defaults have been set for the following exporter env vars:" - echo_info "[${default_exporter_env_vars[*]}]" -fi - -# Check that postgres is accepting connections. -echo_info "Waiting for PostgreSQL to be ready.." -while true; do - ${PG_DIR?}/bin/pg_isready -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" - if [ $? -eq 0 ]; then - break - fi - sleep 2 -done - -echo_info "Checking if "${EXPORTER_PG_USER}" is is created.." -while true; do - PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -c "SELECT 1;" "${EXPORTER_PG_DATABASE}" - if [ $? -eq 0 ]; then - break - fi - sleep 2 -done - -if [[ -f /conf/queries.yml ]] -then - echo_info "Custom queries configuration detected.." - QUERY_DIR='/conf' -else - echo_info "No custom queries detected. Applying default configuration.." - QUERY_DIR='/tmp' - - touch ${QUERY_DIR?}/queries.yml && > ${QUERY_DIR?}/queries.yml - for query in "${QUERIES[@]}" - do - if [[ -f ${CONFIG_DIR?}/${query?}.yml ]] - then - cat ${CONFIG_DIR?}/${query?}.yml >> /tmp/queries.yml - else - echo_err "Query file ${query?}.yml does not exist (it should).." - exit 1 - fi - done - - VERSION=$(PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -qtAX -c "SELECT current_setting('server_version_num')" "${EXPORTER_PG_DATABASE}") - if (( ${VERSION?} >= 90600 )) && (( ${VERSION?} < 100000 )) - then - if [[ -f ${CONFIG_DIR?}/pg96/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg96/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - elif (( ${VERSION?} >= 100000 )) && (( ${VERSION?} < 110000 )) - then - if [[ -f ${CONFIG_DIR?}/pg10/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg10/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg10/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg10/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - elif (( ${VERSION?} >= 110000 )) && (( ${VERSION?} < 120000 )) - then - if [[ -f ${CONFIG_DIR?}/pg11/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg11/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg11/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg11/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - elif (( ${VERSION?} >= 120000 )) && (( ${VERSION?} < 130000 )) - then - if [[ -f ${CONFIG_DIR?}/pg12/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg12/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg12/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg12/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg12/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg12/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - elif (( ${VERSION?} >= 130000 )) && (( ${VERSION?} < 140000 )) - then - if [[ -f ${CONFIG_DIR?}/pg13/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg13/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg13/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg13/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg13/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg13/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - elif (( ${VERSION?} >= 140000 )) && (( ${VERSION?} < 150000 )) - then - if [[ -f ${CONFIG_DIR?}/pg14/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg14/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg14/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg14/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg14/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg14/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - elif (( ${VERSION?} >= 150000 )) - then - if [[ -f ${CONFIG_DIR?}/pg15/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg15/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg15/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg15/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg15/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg15/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - else - echo_err "Unknown or unsupported version of PostgreSQL. Exiting.." - exit 1 - fi -fi - -sed -i \ - -e "s/#PGBACKREST_INFO_THROTTLE_MINUTES#/${PGBACKREST_INFO_THROTTLE_MINUTES:-10}/g" \ - -e "s/#PG_STAT_STATEMENTS_LIMIT#/${PG_STAT_STATEMENTS_LIMIT:-20}/g" \ - -e "s/#PG_STAT_STATEMENTS_THROTTLE_MINUTES#/${PG_STAT_STATEMENTS_THROTTLE_MINUTES:--1}/g" \ - /tmp/queries.yml - -PG_OPTIONS="--extend.query-path=${QUERY_DIR?}/queries.yml --web.listen-address=:${POSTGRES_EXPORTER_PORT}" -if [[ -v WEB_CONFIG_DIR ]]; then - # TODO (jmckulk): define path not dir - PG_OPTIONS+=" --web.config.file=${WEB_CONFIG_DIR}/web-config.yml" -fi - -echo_info "Starting postgres-exporter.." -DATA_SOURCE_URI="${EXPORTER_PG_HOST}:${EXPORTER_PG_PORT}/${EXPORTER_PG_DATABASE}?${EXPORTER_PG_PARAMS}" DATA_SOURCE_USER="${EXPORTER_PG_USER}" DATA_SOURCE_PASS="${EXPORTER_PG_PASSWORD}" ${PG_EXP_HOME?}/postgres_exporter ${PG_OPTIONS?} >>/dev/stdout 2>&1 & -echo $! > $POSTGRES_EXPORTER_PIDFILE - -wait \ No newline at end of file diff --git a/build/crd/.gitignore b/build/crd/.gitignore index 7dcdff02b..348821c83 100644 --- a/build/crd/.gitignore +++ b/build/crd/.gitignore @@ -1,3 +1,4 @@ /generated/ /postgresclusters/generated/ /pgupgrades/generated/ +/crunchy/generated/ diff --git a/build/crd/crunchy/kustomization.yaml b/build/crd/crunchy/kustomization.yaml index 13c0617db..893d3e508 100644 --- a/build/crd/crunchy/kustomization.yaml +++ b/build/crd/crunchy/kustomization.yaml @@ -33,4 +33,4 @@ patchesJson6902: path: "/metadata/labels" value: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 diff --git a/build/crd/kustomization.yaml b/build/crd/kustomization.yaml index 13ef8354f..aaaa069d4 100644 --- a/build/crd/kustomization.yaml +++ b/build/crd/kustomization.yaml @@ -39,4 +39,4 @@ patchesJson6902: path: "/metadata/labels" value: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 diff --git a/build/crd/pgupgrades/kustomization.yaml b/build/crd/pgupgrades/kustomization.yaml index ac750fc3b..87e02ed9f 100644 --- a/build/crd/pgupgrades/kustomization.yaml +++ b/build/crd/pgupgrades/kustomization.yaml @@ -5,18 +5,6 @@ resources: - generated/postgres-operator.crunchydata.com_pgupgrades.yaml patches: -# Remove the zero status field included by controller-gen@v0.8.0. These zero -# values conflict with the CRD controller in Kubernetes before v1.22. -# - https://github.com/kubernetes-sigs/controller-tools/pull/630 -# - https://pr.k8s.io/100970 -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com - patch: |- - - op: remove - path: /status - target: group: apiextensions.k8s.io version: v1 @@ -34,4 +22,4 @@ patches: path: "/metadata/labels" value: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.3.0 + app.kubernetes.io/version: 5.4.2 diff --git a/build/crd/postgresclusters/kustomization.yaml b/build/crd/postgresclusters/kustomization.yaml index 676b32d6e..aaaa069d4 100644 --- a/build/crd/postgresclusters/kustomization.yaml +++ b/build/crd/postgresclusters/kustomization.yaml @@ -39,4 +39,4 @@ patchesJson6902: path: "/metadata/labels" value: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.3.0 + app.kubernetes.io/version: 5.4.2 diff --git a/build/crunchy-postgres-exporter/Dockerfile b/build/crunchy-postgres-exporter/Dockerfile deleted file mode 100644 index 364175d8b..000000000 --- a/build/crunchy-postgres-exporter/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal - -ARG BASEOS -ARG PGVERSION - -COPY licenses /licenses - -# Crunchy PostgreSQL repository -COPY conf/*KEY* / -COPY conf/crunchypg${PGVERSION}.repo /etc/yum.repos.d/ -RUN rpm --import ./*GPG-KEY-crunchydata* - -RUN { microdnf -y module disable postgresql || true; } \ - && microdnf -y update \ - && microdnf install -y \ - findutils \ - postgresql${PGVERSION} \ - && microdnf -y clean all - -RUN mkdir -p /opt/cpm/bin /opt/cpm/conf - -# Add will extract the exporter into the target directory -ADD hack/tools/postgres_exporter.tar.gz /opt/cpm/bin - -COPY hack/tools/pgmonitor/postgres_exporter/common /opt/cpm/conf -COPY hack/tools/pgmonitor/postgres_exporter/linux /opt/cpm/conf -COPY bin/crunchy-postgres-exporter /opt/cpm/bin - -RUN chgrp -R 0 /opt/cpm/bin /opt/cpm/conf && \ - chmod -R g=u /opt/cpm/bin/ opt/cpm/conf - -# postgres_exporter -EXPOSE 9187 - -# The VOLUME directive must appear after all RUN directives to ensure the proper -# volume permissions are applied when building the image -VOLUME ["/conf"] - -USER 2 - -CMD ["/opt/cpm/bin/start.sh"] diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index f83d421dc..156621631 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -18,6 +18,7 @@ limitations under the License. import ( "context" "os" + "strconv" "strings" "github.com/pkg/errors" @@ -61,6 +62,11 @@ func main() { // Set any supplied feature gates; panic on any unrecognized feature gate err := util.AddAndSetFeatureGates(os.Getenv("PGO_FEATURE_GATES")) assertNoError(err) + // Needed for PMM + err = util.DefaultMutableFeatureGate.SetFromMap(map[string]bool{ + string(util.InstanceSidecars): true, + }) + assertNoError(err) otelFlush, err := initOpenTelemetry() assertNoError(err) @@ -73,6 +79,18 @@ func main() { log := logging.FromContext(ctx) log.V(1).Info("debug flag set to true") + // We are forcing `InstanceSidecars` feature to be enabled. + // It's necessary to get actual feature gate values instead of using + // `PGO_FEATURE_GATES` env var to print logs + var featureGates []string + for k := range util.DefaultMutableFeatureGate.GetAll() { + f := string(k) + "=" + strconv.FormatBool(util.DefaultMutableFeatureGate.Enabled(k)) + featureGates = append(featureGates, f) + } + + log.Info("feature gates enabled", + "PGO_FEATURE_GATES", strings.Join(featureGates, ",")) + cruntime.SetLogger(log) cfg, err := runtime.GetConfig() diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 570069560..16767dbd3 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -2,11 +2,11 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null labels: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.3.0 + app.kubernetes.io/version: 5.4.2 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -140,6 +140,7 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -240,10 +241,12 @@ spec: type: object type: array type: object + x-kubernetes-map-type: atomic type: array required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate @@ -320,6 +323,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -376,6 +380,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The @@ -474,6 +479,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -525,6 +531,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The term is applied @@ -625,6 +632,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -681,6 +689,7 @@ spec: The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The @@ -779,6 +788,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -830,6 +840,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term applies to. The term is applied @@ -883,6 +894,7 @@ spec: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata contains metadata for custom resources @@ -907,6 +919,27 @@ spec: resources: description: Resource requirements for the PGUpgrade container. properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -995,8 +1028,8 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 3be7b3515..6c16f497d 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -6,7 +6,7 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 46f5289e3..9b9e84421 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,6 +1,3 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - resources: - bases/postgres-operator.crunchydata.com_postgresclusters.yaml - bases/pgv2.percona.com_perconapgclusters.yaml diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 000000000..d3de1e2f3 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,12 @@ +namespace: postgres-operator + +commonLabels: + app.kubernetes.io/name: pgo + # The version below should match the version on the PostgresCluster CRD + app.kubernetes.io/version: 5.4.2 + postgres-operator.crunchydata.com/control-plane: postgres-operator + +bases: +- ../crd +- ../rbac/cluster +- ../manager diff --git a/config/dev/kustomization.yaml b/config/dev/kustomization.yaml new file mode 100644 index 000000000..d837c9067 --- /dev/null +++ b/config/dev/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- ../default + +patches: +- path: manager-dev.yaml diff --git a/config/dev/manager-dev.yaml b/config/dev/manager-dev.yaml new file mode 100644 index 000000000..538a34cf4 --- /dev/null +++ b/config/dev/manager-dev.yaml @@ -0,0 +1,6 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pgo +spec: + replicas: 0 diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 000000000..5c5f0b84c --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 000000000..2b73b10cf --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pgo +spec: + replicas: 1 + strategy: { type: Recreate } + template: + spec: + containers: + - name: operator + image: postgres-operator + env: + - name: PGO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CRUNCHY_DEBUG + value: "true" + - name: RELATED_IMAGE_POSTGRES_14 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0" + - name: RELATED_IMAGE_POSTGRES_14_GIS_3.1 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.1-0" + - name: RELATED_IMAGE_POSTGRES_14_GIS_3.2 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.2-0" + - name: RELATED_IMAGE_POSTGRES_14_GIS_3.3 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.3-0" + - name: RELATED_IMAGE_POSTGRES_15 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0" + - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.4-3.3-0" + - name: RELATED_IMAGE_PGADMIN + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-17" + - name: RELATED_IMAGE_PGBACKREST + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0" + - name: RELATED_IMAGE_PGBOUNCER + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4" + - name: RELATED_IMAGE_PGEXPORTER + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.4.2-0" + - name: RELATED_IMAGE_PGUPGRADE + value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi8-5.4.2-0" + securityContext: + allowPrivilegeEscalation: false + capabilities: { drop: [ALL] } + readOnlyRootFilesystem: true + runAsNonRoot: true + serviceAccountName: pgo diff --git a/config/rbac/cluster/role_binding.yaml b/config/rbac/cluster/role_binding.yaml index 37f15577c..3c16bddc7 100644 --- a/config/rbac/cluster/role_binding.yaml +++ b/config/rbac/cluster/role_binding.yaml @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: percona-postgresql-operator - namespace: pg-operator \ No newline at end of file + namespace: pg-operator diff --git a/config/rbac/cluster/service_account.yaml b/config/rbac/cluster/service_account.yaml index 89bb4a51f..80413d229 100644 --- a/config/rbac/cluster/service_account.yaml +++ b/config/rbac/cluster/service_account.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: percona-postgresql-operator \ No newline at end of file + name: percona-postgresql-operator diff --git a/config/singlenamespace/kustomization.yaml b/config/singlenamespace/kustomization.yaml new file mode 100644 index 000000000..b616d8ed2 --- /dev/null +++ b/config/singlenamespace/kustomization.yaml @@ -0,0 +1,24 @@ +namespace: postgres-operator + +commonLabels: + app.kubernetes.io/name: pgo + # The version below should match the version on the PostgresCluster CRD + app.kubernetes.io/version: 5.4.2 + postgres-operator.crunchydata.com/control-plane: postgres-operator + +bases: +- ../crd +- ../rbac/namespace +- ../manager + +images: +- name: postgres-operator + newName: registry.developers.crunchydata.com/crunchydata/postgres-operator + newTag: ubi8-5.4.2-0 + +#patchesJson6902: +#- target: { group: apps, version: v1, kind: Deployment, name: pgo } +# path: selectors.yaml + +patchesStrategicMerge: +- manager-target.yaml diff --git a/config/singlenamespace/manager-target.yaml b/config/singlenamespace/manager-target.yaml new file mode 100644 index 000000000..949250e26 --- /dev/null +++ b/config/singlenamespace/manager-target.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pgo +spec: + template: + spec: + containers: + - name: operator + env: + - name: PGO_TARGET_NAMESPACE + valueFrom: { fieldRef: { apiVersion: v1, fieldPath: metadata.namespace } } diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index a65832959..53dc5e4e0 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -13736,7 +13736,7 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 0fb89683b..fa38952c0 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -13736,7 +13736,7 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index d4b391c36..b12d4f815 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -13736,7 +13736,7 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: pgo - app.kubernetes.io/version: 5.2.0 + app.kubernetes.io/version: 5.4.2 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/docs/content/architecture/high-availability.md b/docs/content/architecture/high-availability.md new file mode 100644 index 000000000..f33f61952 --- /dev/null +++ b/docs/content/architecture/high-availability.md @@ -0,0 +1,211 @@ +--- +title: "High Availability" +date: +draft: false +weight: 110 +--- + +One of the great things about PostgreSQL is its reliability: it is very stable +and typically "just works." However, there are certain things that can happen in +the environment that PostgreSQL is deployed in that can affect its uptime, +including: + +- The database storage disk fails or some other hardware failure occurs +- The network on which the database resides becomes unreachable +- The host operating system becomes unstable and crashes +- A key database file becomes corrupted +- A data center is lost + +There may also be downtime events that are due to the normal case of operations, +such as performing a minor upgrade, security patching of operating system, +hardware upgrade, or other maintenance. + +Fortunately, PGO, the Postgres Operator from Crunchy Data, is prepared for this. + +![PostgreSQL Operator high availability Overview](/images/postgresql-ha-overview.png) + +The Crunchy PostgreSQL Operator supports a distributed-consensus based +high availability (HA) system that keeps its managed PostgreSQL clusters up and +running, even if the PostgreSQL Operator disappears. Additionally, it leverages +Kubernetes specific features such as +[Pod Anti-Affinity](#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity) +to limit the surface area that could lead to a PostgreSQL cluster becoming +unavailable. The PostgreSQL Operator also supports automatic healing of failed +primaries and leverages the efficient pgBackRest "delta restore" method, which +eliminates the need to fully reprovision a failed cluster! + +The Crunchy PostgreSQL Operator also maintains high availability during a +routine task such as a PostgreSQL minor version upgrade. + +For workloads that are sensitive to transaction loss, PGO supports PostgreSQL synchronous replication. + +The high availability backing for your PostgreSQL cluster is only as good as +your high availability backing for Kubernetes. To learn more about creating a +[high availability Kubernetes cluster](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/), +please review the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/) +or consult your systems administrator. + +## The Crunchy Postgres Operator High Availability Algorithm + +A critical aspect of any production-grade PostgreSQL deployment is a reliable +and effective high availability (HA) solution. Organizations want to know that +their PostgreSQL deployments can remain available despite various issues that +have the potential to disrupt operations, including hardware failures, network +outages, software errors, or even human mistakes. + +The key portion of high availability that the PostgreSQL Operator provides is +that it delegates the management of HA to the PostgreSQL clusters themselves. +This ensures that the PostgreSQL Operator is not a single-point of failure for +the availability of any of the PostgreSQL clusters that it manages, as the +PostgreSQL Operator is only maintaining the definitions of what should be in the +cluster (e.g. how many instances in the cluster, etc.). + +Each HA PostgreSQL cluster maintains its availability by using Patroni to manage +failover when the primary becomes compromised. Patroni stores the primary’s ID in +annotations on a Kubernetes `Endpoints` object which acts as a lease. The primary +must periodically renew the lease to signal that it’s healthy. If the primary +misses its deadline, replicas compare their WAL positions to see who has the most +up-to-date data. Instances with the latest data try to overwrite the ID on the lease. +The first to succeed becomes the new primary, and all others follow the new primary. + +## How The Crunchy PostgreSQL Operator Uses Pod Anti-Affinity + +Kubernetes has two types of Pod anti-affinity: + +- Preferred: With preferred (`preferredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes will make a best effort to schedule Pods matching the anti-affinity rules to different Nodes. However, if it is not possible to do so, then Kubernetes may schedule one or more Pods to the same Node. +- Required: With required (`requiredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes mandates that each Pod matching the anti-affinity rules **must** be scheduled to different Nodes. However, a Pod may not be scheduled if Kubernetes cannot find a Node that does not contain a Pod matching the rules. + +There is a tradeoff with these two types of pod anti-affinity: while "required" anti-affinity will ensure that all the matching Pods are scheduled on different Nodes, if Kubernetes cannot find an available Node, your Postgres instance may not be scheduled. Likewise, while "preferred" anti-affinity will make a best effort to scheduled your Pods on different Nodes, Kubernetes may compromise and schedule more than one Postgres instance of the same cluster on the same Node. + +By understanding these tradeoffs, the makeup of your Kubernetes cluster, and your requirements, you can choose the method that makes the most sense for your Postgres deployment. We'll show examples of both methods below! + +For an example for how pod anti-affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-anti-affinity). + +## Synchronous Replication: Guarding Against Transactions Loss + +Clusters managed by the Crunchy PostgreSQL Operator can be deployed with +synchronous replication, which is useful for workloads that are sensitive to +losing transactions, as PostgreSQL will not consider a transaction to be +committed until it is committed to all synchronous replicas connected to a +primary. This provides a higher guarantee of data consistency and, when a +healthy synchronous replica is present, a guarantee of the most up-to-date data +during a failover event. + +This comes at a cost of performance: PostgreSQL has to wait for +a transaction to be committed on all synchronous replicas, and a connected client +will have to wait longer than if the transaction only had to be committed on the +primary (which is how asynchronous replication works). Additionally, there is a +potential impact to availability: if a synchronous replica crashes, any writes +to the primary will be blocked until a replica is promoted to become a new +synchronous replica of the primary. + +## Node Affinity + +Kubernetes [Node Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity) +can be used to scheduled Pods to specific Nodes within a Kubernetes cluster. +This can be useful when you want your PostgreSQL instances to take advantage of +specific hardware (e.g. for geospatial applications) or if you want to have a +replica instance deployed to a specific region within your Kubernetes cluster +for high availability purposes. + +For an example for how node affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}##node-affinity). + +## Tolerations + +Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) +can help with the scheduling of Pods to appropriate nodes. There are many +reasons that a Kubernetes administrator may want to use tolerations, such as +restricting the types of Pods that can be assigned to particular Nodes. +Reasoning and strategy for using taints and tolerations is outside the scope of +this documentation. + +You can configure the tolerations for your Postgres instances on the `postgresclusters` custom resource. + +## Pod Topology Spread Constraints + +Kubernetes [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) +can also help you efficiently schedule your workloads by ensuring your Pods are +not scheduled in only one portion of your Kubernetes cluster. By spreading your +Pods across your Kubernetes cluster among your various failure-domains, such as +regions, zones, nodes, and other user-defined topology domains, you can achieve +high availability as well as efficient resource utilization. + +For an example of how pod topology spread constraints work with PGO, please see +the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-topology-spread-constraints). + +## Rolling Updates + +During the lifecycle of a PostgreSQL cluster, there are certain events that may +require a planned restart, such as an update to a "restart required" PostgreSQL +configuration setting (e.g. [`shared_buffers`](https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS)) +or a change to a Kubernetes Pod template (e.g. [changing the memory request]({{< relref "tutorial/resize-cluster.md">}}#customize-cpu-memory)). +Restarts can be disruptive in a high availability deployment, which is +why many setups employ a ["rolling update" strategy](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) +(aka a "rolling restart") to minimize or eliminate downtime during a planned +restart. + +Because PostgreSQL is a stateful application, a simple rolling restart strategy +will not work: PostgreSQL needs to ensure that there is a primary available that +can accept reads and writes. This requires following a method that will minimize +the amount of downtime when the primary is taken offline for a restart. + +The PostgreSQL Operator uses the following algorithm to perform the rolling restart to minimize any potential interruptions: + +1. Each replica is updated in sequential order. This follows the following +process: + + 1. The replica is explicitly shut down to ensure any outstanding changes are + flushed to disk. + + 2. If requested, the PostgreSQL Operator will apply any changes to the Pod. + + 3. The replica is brought back online. The PostgreSQL Operator waits for the + replica to become available before it proceeds to the next replica. + +2. The above steps are repeated until all of the replicas are restarted. + +3. A controlled switchover is performed. The PostgreSQL Operator determines +which replica is the best candidate to become the new primary. It then demotes +the primary to become a replica and promotes the best candidate to become the +new primary. + +4. The former primary follows a process similar to what is described in step 1. + +The downtime is thus constrained to the amount of time the switchover takes. + +PGO will automatically detect when to apply a rolling update. + +## Pod Disruption Budgets + +Pods in a Kubernetes cluster can experience [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) +as a result of actions initiated by the application owner or a Cluster Administrator. During these +voluntary disruptions Pod Disruption Budgets (PDBs) can be used to ensure that a minimum number of Pods +will be running. The operator allows you to define a minimum number of Pods that should be +available for instance sets and PgBouncer deployments in your postgrescluster. This minimum is +configured in the postgrescluster spec and will be used to create PDBs associated to a resource defined +in the spec. For example, the following spec will create two PDBs, one for `instance1` and one for +the PgBouncer deployment: + +``` +spec: + instances: + - name: instance1 + replicas: 3 + minAvailable: 1 + proxy: + pgBouncer: + replicas: 3 + minAvailable: 1 +``` + +{{% notice tip %}} +The `minAvailable` field accepts number (`3`) or string percentage (`50%`) values. For more +information see [Specifying a PodDisruptionBudget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget). +{{% /notice %}} + +If `minAvailable` is set to `0`, we will not reconcile a PDB for the resource and any existing PDBs +will be removed. This will effectively disable Pod Disruption Budgets for the resource. + +If `minAvailable` is not provided for an object, a default value will be defined based on the +number of replicas defined for that object. If there is one replica, a PDB will not be created. If +there is more than one replica defined, a minimum of one Pod will be used. diff --git a/docs/content/architecture/user-management.md b/docs/content/architecture/user-management.md index 03804a361..e8a41c178 100644 --- a/docs/content/architecture/user-management.md +++ b/docs/content/architecture/user-management.md @@ -45,7 +45,7 @@ While the above defaults may work for your application, there are certain cases ## Custom Users and Databases -Users and databases can be customized in the `spec.users` section of the custom resource. These can be adding during cluster creation and adjusted over time, but it's important to note the following: +Users and databases can be customized in the [`spec.users`]({{< relref "/references/crd#postgresclusterspecusersindex" >}}) section of the custom resource. These can be adding during cluster creation and adjusted over time, but it's important to note the following: - If `spec.users` is set during cluster creation, PGO will **not** create any default users or databases except for `postgres`. If you want additional databases, you will need to specify them. - For any users added in `spec.users`, PGO will created a Secret of the format `-pguser-`. This will contain the user credentials. diff --git a/docs/content/guides/huge-pages.md b/docs/content/guides/huge-pages.md index b11de0086..7dce29b6d 100644 --- a/docs/content/guides/huge-pages.md +++ b/docs/content/guides/huge-pages.md @@ -18,7 +18,11 @@ When you enable Huge Pages in your Kube cluster, it is important to keep a few t 2. How many pages were preallocated? Are there any other applications or processes that will be using these pages? 3. Which nodes have Huge Pages enabled? Is it possible that more nodes will be added to the cluster? If so, will they also have Huge Pages enabled? -Once Huge Pages are enabled on one or more nodes in your Kubernetes cluster, you can tell Postgres to start using them by adding some configuration to your PostgresCluster spec (Warning: setting/changing this setting will cause your database to restart): +Once Huge Pages are enabled on one or more nodes in your Kubernetes cluster, you can tell Postgres to start using them by adding some configuration to your PostgresCluster spec: + +{{% notice warning %}} +Warning: setting/changing this setting will cause your database to restart. +{{% /notice %}} ```yaml apiVersion: postgres-operator.crunchydata.com/v1beta1 @@ -26,8 +30,8 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.6-2 - postgresVersion: 14 + image: {{< param imageCrunchyPostgres >}} + postgresVersion: {{< param postgresVersion >}} instances: - name: instance1 resources: @@ -40,7 +44,11 @@ This is where it is important to know the size and the number of Huge Pages avai Note: In the `instances.#.resources` spec, there are `limits` and `requests`. If a request value is not specified (like in the example above), it is presumed to be equal to the limit value. For Huge Pages, the request value must always be equal to the limit value, therefore, it is perfectly acceptable to just specify it in the `limits` section. -Note: Postgres uses the system default size by default. This means that if there are multiple sizes of Huge Pages available on the node(s) and you attempt to use a size in your PostgresCluster that is not the system default, it will fail. To use a non-default size you will need to tell Postgres the size to use with the `huge_page_size` variable, which can be set via dynamic configuration (Warning: setting/changing this parameter will cause your database to restart): +Note: Postgres uses the system default size by default. This means that if there are multiple sizes of Huge Pages available on the node(s) and you attempt to use a size in your PostgresCluster that is not the system default, it will fail. To use a non-default size you will need to tell Postgres the size to use with the `huge_page_size` variable, which can be set via dynamic configuration: + +{{% notice warning %}} +Warning: setting/changing this parameter will cause your database to restart. +{{% /notice %}} ```yaml patroni: @@ -60,7 +68,11 @@ The only dilemma that remains is that those whose PostgresClusters are not using 1. Use Huge Pages! You're already running your Postgres containers on nodes that have Huge Pages enabled, why not use them in Postgres? 2. Create nodes in your Kubernetes cluster that don't have Huge Pages enabled, and put your Postgres containers on those nodes. -3. If for some reason you cannot use Huge Pages in Postgres, but you must run your Postgres containers on nodes that have Huge Pages enabled, you can manually set the `shared_buffers` parameter back to a good setting using dynamic configuration (Warning: setting/changing this parameter will cause your database to restart): +3. If for some reason you cannot use Huge Pages in Postgres, but you must run your Postgres containers on nodes that have Huge Pages enabled, you can manually set the `shared_buffers` parameter back to a good setting using dynamic configuration: + +{{% notice warning %}} +Warning: setting/changing this parameter will cause your database to restart. +{{% /notice %}} ```yaml patroni: diff --git a/docs/content/guides/major-postgres-version-upgrade.md b/docs/content/guides/major-postgres-version-upgrade.md index 7f5bec722..da63a1a31 100644 --- a/docs/content/guides/major-postgres-version-upgrade.md +++ b/docs/content/guides/major-postgres-version-upgrade.md @@ -147,11 +147,13 @@ If you are unable to exec into the Pod, you can run `ANALYZE` directly on each o `pg_upgrade` may also create a script called `delete_old_cluster.sh`, which contains the equivalent of ``` -rm -rf '/pgdata/{{< param fromPostgresVersion >}}' +rm -rf '/pgdata/pg{{< param fromPostgresVersion >}}' ``` When you are satisfied with the upgrade, you can execute this command to remove the old data directory. Do so at your discretion. +Note that the `delete_old_cluster.sh` script does not delete the old WAL files. These are typically found in `/pgdata/pg{{< param fromPostgresVersion >}}_wal`, although they can be stored elsewhere. If you would like to delete these files, this must be done manually. + If you have extensions installed you may need to upgrade those as well. For example, for the `pgaudit` extension we recommend running the following to upgrade: ```sql diff --git a/docs/content/references/_index.md b/docs/content/references/_index.md index f5b4f37f0..2b2a304d9 100644 --- a/docs/content/references/_index.md +++ b/docs/content/references/_index.md @@ -2,5 +2,5 @@ title: "References" date: draft: false -weight: 100 +weight: 103 --- diff --git a/docs/content/references/components.md b/docs/content/references/components.md index 31760bf79..a8ca095ed 100644 --- a/docs/content/references/components.md +++ b/docs/content/references/components.md @@ -95,9 +95,10 @@ The table also lists the initial PGO version that the version of the extension i | Extension | Version | Postgres Versions | Initial PGO Version | |-----------|---------|-------------------|---------------------| | `orafce` | 3.25.1 | 15, 14, 13, 12, 11 | 5.3.0 | -| `orafce` | 3.22.0 | 14, 13, 12, 11, 10 | 5.2.0 | -| `orafce` | 3.22.0 | 14, 13, 12, 11, 10 | 5.1.3 | +| `orafce` | 3.25.1 | 14, 13, 12, 11, 10 | 5.2.1 | +| `orafce` | 3.24.0 | 14, 13, 12, 11, 10 | 5.1.3 | | `orafce` | 3.22.0 | 14, 13, 12, 11, 10 | 5.0.8 | +| `pgAudit` | 1.7.0 | 15 | 5.3.0 | | `pgAudit` | 1.6.2 | 14 | 5.1.0 | | `pgAudit` | 1.6.2 | 14 | 5.0.6 | | `pgAudit` | 1.6.1 | 14 | 5.0.4 | @@ -115,6 +116,9 @@ The table also lists the initial PGO version that the version of the extension i | `pgAudit` | 1.2.2 | 10 | 5.0.0 | | `pgAudit Analyze` | 1.0.8 | 14, 13, 12, 11, 10 | 5.0.3 | | `pgAudit Analyze` | 1.0.7 | 13, 12, 11, 10 | 5.0.0 | +| `pg_cron` | 1.4.2 | 15, 14, 13 | 5.3.0 | +| `pg_cron` | 1.4.2 | 14, 13 | 5.2.1 | +| `pg_cron` | 1.4.1 | 14, 13, 12, 11, 10 | 5.0.5 | | `pg_cron` | 1.3.1 | 14, 13, 12, 11, 10 | 5.0.0 | | `pg_partman` | 4.7.1 | 15, 14, 13, 12, 11 | 5.3.0 | | `pg_partman` | 4.6.2 | 14, 13, 12, 11, 10 | 5.2.0 | @@ -157,6 +161,7 @@ The following extensions are available in the geospatially aware containers (`cr | `PostGIS` | 2.5 | 12, 11 | 5.0.0 | | `PostGIS` | 2.4 | 11, 10 | 5.0.0 | | `PostGIS` | 2.3 | 10 | 5.0.0 | +| `pgrouting` | 3.1.4 | 14 | 5.0.4 | | `pgrouting` | 3.1.3 | 13 | 5.0.0 | | `pgrouting` | 3.0.5 | 13, 12 | 5.0.0 | | `pgrouting` | 2.6.3 | 12, 11, 10 | 5.0.0 | diff --git a/docs/content/references/crd.md b/docs/content/references/crd.md index a70ca77f1..43195dc7c 100644 --- a/docs/content/references/crd.md +++ b/docs/content/references/crd.md @@ -6,8 +6,18949 @@ weight: 100 Packages: +- [pgv2.percona.com/v2](#pgv2perconacomv2) - [postgres-operator.crunchydata.com/v1beta1](#postgres-operatorcrunchydatacomv1beta1) +

pgv2.percona.com/v2

+ +Resource Types: + +- [PerconaPGBackup](#perconapgbackup) + +- [PerconaPGCluster](#perconapgcluster) + +- [PerconaPGRestore](#perconapgrestore) + + + + +

PerconaPGBackup

+ + + + + + +PerconaPGBackup is the CRD that defines a Percona PostgreSQL Backup + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringpgv2.percona.com/v2true
kindstringPerconaPGBackuptrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjecttrue
statusobjectfalse
+ + +

+ PerconaPGBackup.spec + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgClusterstringtrue
repoNamestringThe name of the pgBackRest repo to run the backup command against.true
options[]stringCommand line options to include when running the pgBackRest backup command. https://pgbackrest.org/command.html#command-backupfalse
+ + +

+ PerconaPGBackup.status + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
completedstringfalse
jobNamestringfalse
statestringfalse
+ +

PerconaPGCluster

+ + + + + + +PerconaPGCluster is the CRD that defines a Percona PG Cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringpgv2.percona.com/v2true
kindstringPerconaPGClustertrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjecttrue
statusobjectfalse
+ + +

+ PerconaPGCluster.spec + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
backupsobjectPostgreSQL backup configurationtrue
instances[]objectSpecifies one or more sets of PostgreSQL pods that replicate data for this cluster.true
postgresVersionintegerThe major version of PostgreSQL installed in the PostgreSQL imagetrue
crVersionstringVersion of the operator. Update this to new version after operator upgrade to apply changes to Kubernetes objects. Default is the latest version.false
dataSourceobjectSpecifies a data source for bootstrapping the PostgreSQL cluster.false
databaseInitSQLobjectDatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster.false
exposeobjectSpecification of the service that exposes the PostgreSQL primary instance.false
imagestringThe image name to use for PostgreSQL containers.false
imagePullPolicyenumImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policyfalse
imagePullSecrets[]objectThe image pull secrets used to pull from a private registry Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/false
openshiftbooleanWhether or not the PostgreSQL cluster is being deployed to an OpenShift environment. If the field is unset, the operator will automatically detect the environment.false
patroniobjectfalse
pausebooleanWhether or not the PostgreSQL cluster should be stopped. When this is true, workloads are scaled to zero and CronJobs are suspended. Other resources, such as Services and Volumes, remain in place.false
pmmobjectThe specification of PMM sidecars.false
portintegerThe port on which PostgreSQL should listen.false
proxyobjectThe specification of a proxy that connects to PostgreSQL.false
secretsobjectfalse
standbyobjectRun this cluster as a read-only copy of an existing cluster or archive.false
unmanagedbooleanSuspends the rollout and reconciliation of changes made to the PostgresCluster spec.false
users[]objectUsers to create inside PostgreSQL and the databases they should access. The default creates one user that can access one database matching the PostgresCluster name. An empty list creates no users. Removing a user from this list does NOT drop the user nor revoke their access.false
+ + +

+ PerconaPGCluster.spec.backups + ↩ Parent +

+ + + +PostgreSQL backup configuration + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgbackrestobjectpgBackRest archive configurationtrue
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest + ↩ Parent +

+ + + +pgBackRest archive configuration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
repos[]objectDefines a pgBackRest repositorytrue
configuration[]objectProjected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.htmlfalse
globalmap[string]stringGlobal pgBackRest configuration settings. These settings are included in the "global" section of the pgBackRest configuration generated by the PostgreSQL Operator, and then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.htmlfalse
imagestringThe image name to use for pgBackRest containers. Utilized to run pgBackRest repository hosts and backups. The image may also be set using the RELATED_IMAGE_PGBACKREST environment variablefalse
jobsobjectJobs field allows configuration for all backup jobsfalse
manualobjectDefines details for manual pgBackRest backup Jobsfalse
metadataobjectMetadata contains metadata for custom resourcesfalse
repoHostobjectDefines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment.false
restoreobjectDefines details for performing an in-place restore using pgBackRestfalse
sidecarsobjectConfiguration for pgBackRest sidecar containersfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index] + ↩ Parent +

+ + + +PGBackRestRepo represents a pgBackRest repository. Only one of its members may be specified. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe name of the the repositorytrue
azureobjectRepresents a pgBackRest repository that is created using Azure storagefalse
gcsobjectRepresents a pgBackRest repository that is created using Google Cloud Storagefalse
s3objectRepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storagefalse
schedulesobjectDefines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backupfalse
volumeobjectRepresents a pgBackRest repository that is created using a PersistentVolumeClaimfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].azure + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using Azure storage + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstringThe Azure container utilized for the repositorytrue
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].gcs + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using Google Cloud Storage + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
bucketstringThe GCS bucket utilized for the repositorytrue
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].s3 + ↩ Parent +

+ + + +RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storage + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
bucketstringThe S3 bucket utilized for the repositorytrue
endpointstringA valid endpoint corresponding to the specified regiontrue
regionstringThe region corresponding to the S3 buckettrue
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].schedules + ↩ Parent +

+ + + +Defines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backup + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
differentialstringDefines the Cron schedule for a differential pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
fullstringDefines the Cron schedule for a full pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
incrementalstringDefines the Cron schedule for an incremental pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using a PersistentVolumeClaim + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
volumeClaimSpecobjectDefines a PersistentVolumeClaim spec used to create and/or bind a volumetrue
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec + ↩ Parent +

+ + + +Defines a PersistentVolumeClaim spec used to create and/or bind a volume + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSource + ↩ Parent +

+ + + +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSourceRef + ↩ Parent +

+ + + +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.resources + ↩ Parent +

+ + + +resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector + ↩ Parent +

+ + + +selector is a label query over volumes to consider for binding. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index] + ↩ Parent +

+ + + +Projection that may be projected along with other supported volume types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].configMap + ↩ Parent +

+ + + +configMap information about the configMap data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].configMap.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].downwardAPI + ↩ Parent +

+ + + +downwardAPI information about the downwardAPI data to project + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index] + ↩ Parent +

+ + + +DownwardAPIVolumeFile represents information to create the file containing the pod field + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].fieldRef + ↩ Parent +

+ + + +Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].resourceFieldRef + ↩ Parent +

+ + + +Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].secret + ↩ Parent +

+ + + +secret information about the secret data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].secret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.configuration[index].serviceAccountToken + ↩ Parent +

+ + + +serviceAccountToken is information about the serviceAccountToken data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs + ↩ Parent +

+ + + +Jobs field allows configuration for all backup jobs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
affinityobjectScheduling constraints of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
priorityClassNamestringPriority class name for the pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource limits for backup jobs. Includes manual, scheduled and replica create backupsfalse
tolerations[]objectTolerations of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
ttlSecondsAfterFinishedintegerLimit the lifetime of a Job that has finished. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity + ↩ Parent +

+ + + +Scheduling constraints of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.resources + ↩ Parent +

+ + + +Resource limits for backup jobs. Includes manual, scheduled and replica create backups + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.jobs.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.manual + ↩ Parent +

+ + + +Defines details for manual pgBackRest backup Jobs + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
repoNamestringThe name of the pgBackRest repo to run the backup command against.true
options[]stringCommand line options to include when running the pgBackRest backup command. https://pgbackrest.org/command.html#command-backupfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.metadata + ↩ Parent +

+ + + +Metadata contains metadata for custom resources + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost + ↩ Parent +

+ + + +Defines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
affinityobjectScheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
priorityClassNamestringPriority class name for the pgBackRest repo host pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for a pgBackRest repository hostfalse
sshConfigMapobjectConfigMap containing custom SSH configuration. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization.false
sshSecretobjectSecret containing custom SSH keys. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization.false
tolerations[]objectTolerations of a PgBackRest repo host pod. Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a Dedicated repo host pod. Changing this value causes the repo host to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity + ↩ Parent +

+ + + +Scheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.resources + ↩ Parent +

+ + + +Resource requirements for a pgBackRest repository host + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.sshConfigMap + ↩ Parent +

+ + + +ConfigMap containing custom SSH configuration. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.sshConfigMap.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.sshSecret + ↩ Parent +

+ + + +Secret containing custom SSH keys. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.sshSecret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index] + ↩ Parent +

+ + + +TopologySpreadConstraint specifies how to spread matching pods among the given topology. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
matchLabelKeys[]stringMatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).false
nodeAffinityPolicystringNodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
nodeTaintsPolicystringNodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index].labelSelector + ↩ Parent +

+ + + +LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore + ↩ Parent +

+ + + +Defines details for performing an in-place restore using pgBackRest + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enabledbooleanWhether or not in-place pgBackRest restores are enabled for this PostgresCluster.true
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity + ↩ Parent +

+ + + +Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.resources + ↩ Parent +

+ + + +Resource requirements for the pgBackRest restore Job. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.restore.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars + ↩ Parent +

+ + + +Configuration for pgBackRest sidecar containers + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgbackrestobjectDefines the configuration for the pgBackRest sidecar containerfalse
pgbackrestConfigobjectDefines the configuration for the pgBackRest config sidecar containerfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrest + ↩ Parent +

+ + + +Defines the configuration for the pgBackRest sidecar container + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources + ↩ Parent +

+ + + +Resource requirements for a sidecar container + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig + ↩ Parent +

+ + + +Defines the configuration for the pgBackRest config sidecar container + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig.resources + ↩ Parent +

+ + + +Resource requirements for a sidecar container + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.instances[index] + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
affinityobjectScheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
metadataobjectMetadata contains metadata for custom resourcesfalse
minAvailableint or stringMinimum number of pods that should be available at a time. Defaults to one when the replicas field is greater than one.false
namestringName that associates this set of PostgreSQL pods. This field is optional when only one instance set is defined. Each instance set in a cluster must have a unique name. The combined length of this and the cluster name must be 46 characters or less.false
priorityClassNamestringPriority class name for the PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
replicasintegerNumber of desired PostgreSQL pods.false
resourcesobjectCompute resources of a PostgreSQL container.false
sidecars[]objectCustom sidecars for PostgreSQL instance pods. Changing this value causes PostgreSQL to restart.false
tolerations[]objectTolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
walVolumeClaimSpecobjectDefines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.htmlfalse
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec + ↩ Parent +

+ + + +Defines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.dataSource + ↩ Parent +

+ + + +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.dataSourceRef + ↩ Parent +

+ + + +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.resources + ↩ Parent +

+ + + +resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.selector + ↩ Parent +

+ + + +selector is a label query over volumes to consider for binding. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].dataVolumeClaimSpec.selector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity + ↩ Parent +

+ + + +Scheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].metadata + ↩ Parent +

+ + + +Metadata contains metadata for custom resources + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
+ + +

+ PerconaPGCluster.spec.instances[index].resources + ↩ Parent +

+ + + +Compute resources of a PostgreSQL container. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.instances[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index] + ↩ Parent +

+ + + +A single application container that you want to run within a pod. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.true
args[]stringArguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
command[]stringEntrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
env[]objectList of environment variables to set in the container. Cannot be updated.false
envFrom[]objectList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.false
imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.false
imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-imagesfalse
lifecycleobjectActions that the management system should take in response to container lifecycle events. Cannot be updated.false
livenessProbeobjectPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
ports[]objectList of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.false
readinessProbeobjectPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
resourcesobjectCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
securityContextobjectSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/false
startupProbeobjectStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.false
stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is falsefalse
terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.false
terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.false
ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.false
volumeDevices[]objectvolumeDevices is the list of block devices to be used by the container.false
volumeMounts[]objectPod volumes to mount into the container's filesystem. Cannot be updated.false
workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index] + ↩ Parent +

+ + + +EnvVar represents an environment variable present in a Container. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the environment variable. Must be a C_IDENTIFIER.true
valuestringVariable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".false
valueFromobjectSource for the environment variable's value. Cannot be used if value is not empty.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index].valueFrom + ↩ Parent +

+ + + +Source for the environment variable's value. Cannot be used if value is not empty. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapKeyRefobjectSelects a key of a ConfigMap.false
fieldRefobjectSelects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.false
secretKeyRefobjectSelects a key of a secret in the pod's namespacefalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index].valueFrom.configMapKeyRef + ↩ Parent +

+ + + +Selects a key of a ConfigMap. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe key to select.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index].valueFrom.fieldRef + ↩ Parent +

+ + + +Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index].valueFrom.resourceFieldRef + ↩ Parent +

+ + + +Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].env[index].valueFrom.secretKeyRef + ↩ Parent +

+ + + +Selects a key of a secret in the pod's namespace + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].envFrom[index] + ↩ Parent +

+ + + +EnvFromSource represents the source of a set of ConfigMaps + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapRefobjectThe ConfigMap to select fromfalse
prefixstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.false
secretRefobjectThe Secret to select fromfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].envFrom[index].configMapRef + ↩ Parent +

+ + + +The ConfigMap to select from + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap must be definedfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].envFrom[index].secretRef + ↩ Parent +

+ + + +The Secret to select from + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret must be definedfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle + ↩ Parent +

+ + + +Actions that the management system should take in response to container lifecycle events. Cannot be updated. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
postStartobjectPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
preStopobjectPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.postStart + ↩ Parent +

+ + + +PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.postStart.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.postStart.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.postStart.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.postStart.tcpSocket + ↩ Parent +

+ + + +Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.preStop + ↩ Parent +

+ + + +PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.preStop.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.preStop.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.preStop.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].lifecycle.preStop.tcpSocket + ↩ Parent +

+ + + +Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe + ↩ Parent +

+ + + +Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].livenessProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].ports[index] + ↩ Parent +

+ + + +ContainerPort represents a network port in a single container. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerPortintegerNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.true
hostIPstringWhat host IP to bind the external port to.false
hostPortintegerNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.false
namestringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.false
protocolstringProtocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe + ↩ Parent +

+ + + +Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].readinessProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].resources + ↩ Parent +

+ + + +Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].securityContext + ↩ Parent +

+ + + +SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.false
capabilitiesobjectThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.false
privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.false
procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.false
readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.false
runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seLinuxOptionsobjectThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seccompProfileobjectThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.false
windowsOptionsobjectThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].securityContext.capabilities + ↩ Parent +

+ + + +The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
add[]stringAdded capabilitiesfalse
drop[]stringRemoved capabilitiesfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].securityContext.seLinuxOptions + ↩ Parent +

+ + + +The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
levelstringLevel is SELinux level label that applies to the container.false
rolestringRole is a SELinux role label that applies to the container.false
typestringType is a SELinux type label that applies to the container.false
userstringUser is a SELinux user label that applies to the container.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].securityContext.seccompProfile + ↩ Parent +

+ + + +The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestringtype indicates which kind of seccomp profile will be applied. Valid options are: + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.true
localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].securityContext.windowsOptions + ↩ Parent +

+ + + +The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.false
gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.false
hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.false
runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe + ↩ Parent +

+ + + +StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].startupProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].volumeDevices[index] + ↩ Parent +

+ + + +volumeDevice describes a mapping of a raw block device within a container. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
devicePathstringdevicePath is the path inside of the container that the device will be mapped to.true
namestringname must match the name of a persistentVolumeClaim in the podtrue
+ + +

+ PerconaPGCluster.spec.instances[index].sidecars[index].volumeMounts[index] + ↩ Parent +

+ + + +VolumeMount describes a mounting of a Volume within a container. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
mountPathstringPath within the container at which the volume should be mounted. Must not contain ':'.true
namestringThis must match the Name of a Volume.true
mountPropagationstringmountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.false
readOnlybooleanMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.false
subPathstringPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).false
subPathExprstringExpanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.false
+ + +

+ PerconaPGCluster.spec.instances[index].tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.instances[index].topologySpreadConstraints[index] + ↩ Parent +

+ + + +TopologySpreadConstraint specifies how to spread matching pods among the given topology. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
matchLabelKeys[]stringMatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).false
nodeAffinityPolicystringNodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
nodeTaintsPolicystringNodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
+ + +

+ PerconaPGCluster.spec.instances[index].topologySpreadConstraints[index].labelSelector + ↩ Parent +

+ + + +LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].topologySpreadConstraints[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec + ↩ Parent +

+ + + +Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.dataSource + ↩ Parent +

+ + + +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.dataSourceRef + ↩ Parent +

+ + + +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.resources + ↩ Parent +

+ + + +resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.selector + ↩ Parent +

+ + + +selector is a label query over volumes to consider for binding. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.instances[index].walVolumeClaimSpec.selector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource + ↩ Parent +

+ + + +Specifies a data source for bootstrapping the PostgreSQL cluster. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgbackrestobjectDefines a pgBackRest cloud-based data source that can be used to pre-populate the the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL clusterfalse
postgresClusterobjectDefines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL clusterfalse
volumesobjectDefines any existing volumes to reuse for this PostgresCluster.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest + ↩ Parent +

+ + + +Defines a pgBackRest cloud-based data source that can be used to pre-populate the the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
repoobjectDefines a pgBackRest repositorytrue
stanzastringThe name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. Defaults to `db` if not provided.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configuration[]objectProjected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.htmlfalse
globalmap[string]stringGlobal pgBackRest configuration settings. These settings are included in the "global" section of the pgBackRest configuration generated by the PostgreSQL Operator, and then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.htmlfalse
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo + ↩ Parent +

+ + + +Defines a pgBackRest repository + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe name of the the repositorytrue
azureobjectRepresents a pgBackRest repository that is created using Azure storagefalse
gcsobjectRepresents a pgBackRest repository that is created using Google Cloud Storagefalse
s3objectRepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storagefalse
schedulesobjectDefines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backupfalse
volumeobjectRepresents a pgBackRest repository that is created using a PersistentVolumeClaimfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.azure + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using Azure storage + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstringThe Azure container utilized for the repositorytrue
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.gcs + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using Google Cloud Storage + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
bucketstringThe GCS bucket utilized for the repositorytrue
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.s3 + ↩ Parent +

+ + + +RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storage + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
bucketstringThe S3 bucket utilized for the repositorytrue
endpointstringA valid endpoint corresponding to the specified regiontrue
regionstringThe region corresponding to the S3 buckettrue
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.schedules + ↩ Parent +

+ + + +Defines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backup + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
differentialstringDefines the Cron schedule for a differential pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
fullstringDefines the Cron schedule for a full pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
incrementalstringDefines the Cron schedule for an incremental pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume + ↩ Parent +

+ + + +Represents a pgBackRest repository that is created using a PersistentVolumeClaim + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
volumeClaimSpecobjectDefines a PersistentVolumeClaim spec used to create and/or bind a volumetrue
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec + ↩ Parent +

+ + + +Defines a PersistentVolumeClaim spec used to create and/or bind a volume + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.dataSource + ↩ Parent +

+ + + +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.dataSourceRef + ↩ Parent +

+ + + +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources + ↩ Parent +

+ + + +resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.selector + ↩ Parent +

+ + + +selector is a label query over volumes to consider for binding. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.selector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity + ↩ Parent +

+ + + +Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index] + ↩ Parent +

+ + + +Projection that may be projected along with other supported volume types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].configMap + ↩ Parent +

+ + + +configMap information about the configMap data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].configMap.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI + ↩ Parent +

+ + + +downwardAPI information about the downwardAPI data to project + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index] + ↩ Parent +

+ + + +DownwardAPIVolumeFile represents information to create the file containing the pod field + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index].fieldRef + ↩ Parent +

+ + + +Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index].resourceFieldRef + ↩ Parent +

+ + + +Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].secret + ↩ Parent +

+ + + +secret information about the secret data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].secret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.configuration[index].serviceAccountToken + ↩ Parent +

+ + + +serviceAccountToken is information about the serviceAccountToken data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.resources + ↩ Parent +

+ + + +Resource requirements for the pgBackRest restore Job. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.dataSource.pgbackrest.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster + ↩ Parent +

+ + + +Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity + ↩ Parent +

+ + + +Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.resources + ↩ Parent +

+ + + +Resource requirements for the pgBackRest restore Job. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.dataSource.postgresCluster.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.dataSource.volumes + ↩ Parent +

+ + + +Defines any existing volumes to reuse for this PostgresCluster. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgBackRestVolumeobjectDefines the existing pgBackRest repo volume and directory to use in the current PostgresCluster.false
pgDataVolumeobjectDefines the existing pgData volume and directory to use in the current PostgresCluster.false
pgWALVolumeobjectDefines the existing pg_wal volume and directory to use in the current PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by a pgData volume.false
+ + +

+ PerconaPGCluster.spec.dataSource.volumes.pgBackRestVolume + ↩ Parent +

+ + + +Defines the existing pgBackRest repo volume and directory to use in the current PostgresCluster. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
+ + +

+ PerconaPGCluster.spec.dataSource.volumes.pgDataVolume + ↩ Parent +

+ + + +Defines the existing pgData volume and directory to use in the current PostgresCluster. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
+ + +

+ PerconaPGCluster.spec.dataSource.volumes.pgWALVolume + ↩ Parent +

+ + + +Defines the existing pg_wal volume and directory to use in the current PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by a pgData volume. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
+ + +

+ PerconaPGCluster.spec.databaseInitSQL + ↩ Parent +

+ + + +DatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringKey is the ConfigMap data key that points to a SQL stringtrue
namestringName is the name of a ConfigMaptrue
+ + +

+ PerconaPGCluster.spec.expose + ↩ Parent +

+ + + +Specification of the service that exposes the PostgreSQL primary instance. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
nodePortintegerThe port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeportfalse
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typesfalse
+ + +

+ PerconaPGCluster.spec.imagePullSecrets[index] + ↩ Parent +

+ + + +LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
+ + +

+ PerconaPGCluster.spec.patroni + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
dynamicConfigurationobjectPatroni dynamic configuration settings. Changes to this value will be automatically reloaded without validation. Changes to certain PostgreSQL parameters cause PostgreSQL to restart. More info: https://patroni.readthedocs.io/en/latest/SETTINGS.htmlfalse
leaderLeaseDurationSecondsintegerTTL of the cluster leader lock. "Think of it as the length of time before initiation of the automatic failover process." Changing this value causes PostgreSQL to restart.false
portintegerThe port on which Patroni should listen. Changing this value causes PostgreSQL to restart.false
switchoverobjectSwitchover gives options to perform ad hoc switchovers in a PostgresCluster.false
syncPeriodSecondsintegerThe interval for refreshing the leader lock and applying dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. Changing this value causes PostgreSQL to restart.false
+ + +

+ PerconaPGCluster.spec.patroni.switchover + ↩ Parent +

+ + + +Switchover gives options to perform ad hoc switchovers in a PostgresCluster. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enabledbooleanWhether or not the operator should allow switchovers in a PostgresClustertrue
targetInstancestringThe instance that should become primary during a switchover. This field is optional when Type is "Switchover" and required when Type is "Failover". When it is not specified, a healthy replica is automatically selected.false
typeenumType of switchover to perform. Valid options are Switchover and Failover. "Switchover" changes the primary instance of a healthy PostgresCluster. "Failover" forces a particular instance to be primary, regardless of other factors. A TargetInstance must be specified to failover. NOTE: The Failover type is reserved as the "last resort" case.false
+ + +

+ PerconaPGCluster.spec.pmm + ↩ Parent +

+ + + +The specification of PMM sidecars. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enabledbooleantrue
imagestringtrue
containerSecurityContextobjectSecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.false
imagePullPolicyenumImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policyfalse
resourcesobjectCompute resources of a PMM container.false
runtimeClassNamestringfalse
secretstringfalse
serverHoststringfalse
+ + +

+ PerconaPGCluster.spec.pmm.containerSecurityContext + ↩ Parent +

+ + + +SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.false
capabilitiesobjectThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.false
privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.false
procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.false
readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.false
runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seLinuxOptionsobjectThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seccompProfileobjectThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.false
windowsOptionsobjectThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.false
+ + +

+ PerconaPGCluster.spec.pmm.containerSecurityContext.capabilities + ↩ Parent +

+ + + +The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
add[]stringAdded capabilitiesfalse
drop[]stringRemoved capabilitiesfalse
+ + +

+ PerconaPGCluster.spec.pmm.containerSecurityContext.seLinuxOptions + ↩ Parent +

+ + + +The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
levelstringLevel is SELinux level label that applies to the container.false
rolestringRole is a SELinux role label that applies to the container.false
typestringType is a SELinux type label that applies to the container.false
userstringUser is a SELinux user label that applies to the container.false
+ + +

+ PerconaPGCluster.spec.pmm.containerSecurityContext.seccompProfile + ↩ Parent +

+ + + +The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestringtype indicates which kind of seccomp profile will be applied. Valid options are: + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.true
localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".false
+ + +

+ PerconaPGCluster.spec.pmm.containerSecurityContext.windowsOptions + ↩ Parent +

+ + + +The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.false
gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.false
hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.false
runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
+ + +

+ PerconaPGCluster.spec.pmm.resources + ↩ Parent +

+ + + +Compute resources of a PMM container. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.pmm.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.proxy + ↩ Parent +

+ + + +The specification of a proxy that connects to PostgreSQL. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgBouncerobjectDefines a PgBouncer proxy and connection pooler.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer + ↩ Parent +

+ + + +Defines a PgBouncer proxy and connection pooler. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
affinityobjectScheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configobjectConfiguration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reloadfalse
customTLSSecretobjectA secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-pathsfalse
exposeobjectSpecification of the service that exposes PgBouncer.false
exposeSuperusersbooleanAllow SUPERUSERs to connect through PGBouncer.false
imagestringName of a container image that can run PgBouncer 1.15 or newer. Changing this value causes PgBouncer to restart. The image may also be set using the RELATED_IMAGE_PGBOUNCER environment variable. More info: https://kubernetes.io/docs/concepts/containers/imagesfalse
metadataobjectMetadata contains metadata for custom resourcesfalse
minAvailableint or stringMinimum number of pods that should be available at a time. Defaults to one when the replicas field is greater than one.false
portintegerPort on which PgBouncer should listen for client connections. Changing this value causes PgBouncer to restart.false
priorityClassNamestringPriority class name for the pgBouncer pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
replicasintegerNumber of desired PgBouncer pods.false
resourcesobjectCompute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
sidecars[]objectCustom sidecars for a PgBouncer pod. Changing this value causes PgBouncer to restart.false
tolerations[]objectTolerations of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity + ↩ Parent +

+ + + +Scheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity + ↩ Parent +

+ + + +Describes node affinity scheduling rules for the pod. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference + ↩ Parent +

+ + + +A node selector term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + ↩ Parent +

+ + + +If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] + ↩ Parent +

+ + + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] + ↩ Parent +

+ + + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity + ↩ Parent +

+ + + +Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity + ↩ Parent +

+ + + +Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm + ↩ Parent +

+ + + +Required. A pod affinity term, associated with the corresponding weight. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] + ↩ Parent +

+ + + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector + ↩ Parent +

+ + + +A label query over a set of resources, in this case pods. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector + ↩ Parent +

+ + + +A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config + ↩ Parent +

+ + + +Configuration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
databasesmap[string]stringPgBouncer database definitions. The key is the database requested by a client while the value is a libpq-styled connection string. The special key "*" acts as a fallback. When this field is empty, PgBouncer is configured with a single "*" entry that connects to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databasesfalse
files[]objectFiles to mount under "/etc/pgbouncer". When specified, settings in the "pgbouncer.ini" file are loaded before all others. From there, other files may be included by absolute path. Changing these references causes PgBouncer to restart, but changes to the file contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directivefalse
globalmap[string]stringSettings that apply to the entire PgBouncer process. More info: https://www.pgbouncer.org/config.htmlfalse
usersmap[string]stringConnection settings specific to particular users. More info: https://www.pgbouncer.org/config.html#section-usersfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index] + ↩ Parent +

+ + + +Projection that may be projected along with other supported volume types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].configMap + ↩ Parent +

+ + + +configMap information about the configMap data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].configMap.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI + ↩ Parent +

+ + + +downwardAPI information about the downwardAPI data to project + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index] + ↩ Parent +

+ + + +DownwardAPIVolumeFile represents information to create the file containing the pod field + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].fieldRef + ↩ Parent +

+ + + +Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].resourceFieldRef + ↩ Parent +

+ + + +Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].secret + ↩ Parent +

+ + + +secret information about the secret data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].secret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.config.files[index].serviceAccountToken + ↩ Parent +

+ + + +serviceAccountToken is information about the serviceAccountToken data to project + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.customTLSSecret + ↩ Parent +

+ + + +A secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.customTLSSecret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.expose + ↩ Parent +

+ + + +Specification of the service that exposes PgBouncer. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
nodePortintegerThe port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeportfalse
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typesfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.metadata + ↩ Parent +

+ + + +Metadata contains metadata for custom resources + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.resources + ↩ Parent +

+ + + +Compute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index] + ↩ Parent +

+ + + +A single application container that you want to run within a pod. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.true
args[]stringArguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
command[]stringEntrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
env[]objectList of environment variables to set in the container. Cannot be updated.false
envFrom[]objectList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.false
imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.false
imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-imagesfalse
lifecycleobjectActions that the management system should take in response to container lifecycle events. Cannot be updated.false
livenessProbeobjectPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
ports[]objectList of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.false
readinessProbeobjectPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
resourcesobjectCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
securityContextobjectSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/false
startupProbeobjectStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.false
stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is falsefalse
terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.false
terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.false
ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.false
volumeDevices[]objectvolumeDevices is the list of block devices to be used by the container.false
volumeMounts[]objectPod volumes to mount into the container's filesystem. Cannot be updated.false
workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index] + ↩ Parent +

+ + + +EnvVar represents an environment variable present in a Container. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the environment variable. Must be a C_IDENTIFIER.true
valuestringVariable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".false
valueFromobjectSource for the environment variable's value. Cannot be used if value is not empty.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index].valueFrom + ↩ Parent +

+ + + +Source for the environment variable's value. Cannot be used if value is not empty. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapKeyRefobjectSelects a key of a ConfigMap.false
fieldRefobjectSelects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.false
secretKeyRefobjectSelects a key of a secret in the pod's namespacefalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index].valueFrom.configMapKeyRef + ↩ Parent +

+ + + +Selects a key of a ConfigMap. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe key to select.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index].valueFrom.fieldRef + ↩ Parent +

+ + + +Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index].valueFrom.resourceFieldRef + ↩ Parent +

+ + + +Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].env[index].valueFrom.secretKeyRef + ↩ Parent +

+ + + +Selects a key of a secret in the pod's namespace + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].envFrom[index] + ↩ Parent +

+ + + +EnvFromSource represents the source of a set of ConfigMaps + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
configMapRefobjectThe ConfigMap to select fromfalse
prefixstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.false
secretRefobjectThe Secret to select fromfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].envFrom[index].configMapRef + ↩ Parent +

+ + + +The ConfigMap to select from + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].envFrom[index].secretRef + ↩ Parent +

+ + + +The Secret to select from + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret must be definedfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle + ↩ Parent +

+ + + +Actions that the management system should take in response to container lifecycle events. Cannot be updated. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
postStartobjectPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
preStopobjectPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.postStart + ↩ Parent +

+ + + +PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.postStart.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.postStart.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.postStart.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.postStart.tcpSocket + ↩ Parent +

+ + + +Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.preStop + ↩ Parent +

+ + + +PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.preStop.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.preStop.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.preStop.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].lifecycle.preStop.tcpSocket + ↩ Parent +

+ + + +Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe + ↩ Parent +

+ + + +Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].livenessProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].ports[index] + ↩ Parent +

+ + + +ContainerPort represents a network port in a single container. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerPortintegerNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.true
hostIPstringWhat host IP to bind the external port to.false
hostPortintegerNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.false
namestringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.false
protocolstringProtocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe + ↩ Parent +

+ + + +Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].readinessProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].resources + ↩ Parent +

+ + + +Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].securityContext + ↩ Parent +

+ + + +SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.false
capabilitiesobjectThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.false
privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.false
procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.false
readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.false
runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seLinuxOptionsobjectThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seccompProfileobjectThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.false
windowsOptionsobjectThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].securityContext.capabilities + ↩ Parent +

+ + + +The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
add[]stringAdded capabilitiesfalse
drop[]stringRemoved capabilitiesfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].securityContext.seLinuxOptions + ↩ Parent +

+ + + +The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
levelstringLevel is SELinux level label that applies to the container.false
rolestringRole is a SELinux role label that applies to the container.false
typestringType is a SELinux type label that applies to the container.false
userstringUser is a SELinux user label that applies to the container.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].securityContext.seccompProfile + ↩ Parent +

+ + + +The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestringtype indicates which kind of seccomp profile will be applied. Valid options are: + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.true
localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].securityContext.windowsOptions + ↩ Parent +

+ + + +The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.false
gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.false
hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.false
runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe + ↩ Parent +

+ + + +StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe.exec + ↩ Parent +

+ + + +Exec specifies the action to take. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe.grpc + ↩ Parent +

+ + + +GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe.httpGet + ↩ Parent +

+ + + +HTTPGet specifies the http request to perform. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe.httpGet.httpHeaders[index] + ↩ Parent +

+ + + +HTTPHeader describes a custom header to be used in HTTP probes + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].startupProbe.tcpSocket + ↩ Parent +

+ + + +TCPSocket specifies an action involving a TCP port. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].volumeDevices[index] + ↩ Parent +

+ + + +volumeDevice describes a mapping of a raw block device within a container. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
devicePathstringdevicePath is the path inside of the container that the device will be mapped to.true
namestringname must match the name of a persistentVolumeClaim in the podtrue
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.sidecars[index].volumeMounts[index] + ↩ Parent +

+ + + +VolumeMount describes a mounting of a Volume within a container. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
mountPathstringPath within the container at which the volume should be mounted. Must not contain ':'.true
namestringThis must match the Name of a Volume.true
mountPropagationstringmountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.false
readOnlybooleanMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.false
subPathstringPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).false
subPathExprstringExpanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.tolerations[index] + ↩ Parent +

+ + + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index] + ↩ Parent +

+ + + +TopologySpreadConstraint specifies how to spread matching pods among the given topology. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
matchLabelKeys[]stringMatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).false
nodeAffinityPolicystringNodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
nodeTaintsPolicystringNodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index].labelSelector + ↩ Parent +

+ + + +LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
+ + +

+ PerconaPGCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index].labelSelector.matchExpressions[index] + ↩ Parent +

+ + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
+ + +

+ PerconaPGCluster.spec.secrets + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
customReplicationTLSSecretobjectThe secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same.false
customTLSSecretobjectThe secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same.false
+ + +

+ PerconaPGCluster.spec.secrets.customReplicationTLSSecret + ↩ Parent +

+ + + +The secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.secrets.customReplicationTLSSecret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.secrets.customTLSSecret + ↩ Parent +

+ + + +The secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
+ + +

+ PerconaPGCluster.spec.secrets.customTLSSecret.items[index] + ↩ Parent +

+ + + +Maps a string key to a path within a volume. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
+ + +

+ PerconaPGCluster.spec.standby + ↩ Parent +

+ + + +Run this cluster as a read-only copy of an existing cluster or archive. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enabledbooleanWhether or not the PostgreSQL cluster should be read-only. When this is true, WAL files are applied from a pgBackRest repository or another PostgreSQL server.false
hoststringNetwork address of the PostgreSQL server to follow via streaming replication.false
portintegerNetwork port of the PostgreSQL server to follow via streaming replication.false
repoNamestringThe name of the pgBackRest repository to follow for WAL files.false
+ + +

+ PerconaPGCluster.spec.users[index] + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringThe name of this PostgreSQL user. The value may contain only lowercase letters, numbers, and hyphen so that it fits into Kubernetes metadata.true
databases[]stringDatabases to which this user can connect and create objects. Removing a database from this list does NOT revoke access. This field is ignored for the "postgres" user.false
optionsstringALTER ROLE options except for PASSWORD. This field is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.htmlfalse
passwordobjectProperties of the password generated for this user.false
secretNamestringThe secret name to generate user, password, connection info this PostgreSQL user.false
+ + +

+ PerconaPGCluster.spec.users[index].password + ↩ Parent +

+ + + +Properties of the password generated for this user. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typeenumType of password to generate. Defaults to ASCII. Valid options are ASCII and AlphaNumeric. "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set.true
+ + +

+ PerconaPGCluster.status + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgbouncerobjecttrue
postgresobjecttrue
statestringtrue
hoststringfalse
+ + +

+ PerconaPGCluster.status.pgbouncer + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
readyintegerfalse
sizeintegerfalse
+ + +

+ PerconaPGCluster.status.postgres + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
instances[]objectfalse
readyintegerfalse
sizeintegerfalse
+ + +

+ PerconaPGCluster.status.postgres.instances[index] + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringtrue
readyintegerfalse
sizeintegerfalse
+ +

PerconaPGRestore

+ + + + + + +PerconaPGRestore is the CRD that defines a Percona PostgreSQL Restore + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringpgv2.percona.com/v2true
kindstringPerconaPGRestoretrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjecttrue
statusobjectfalse
+ + +

+ PerconaPGRestore.spec + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
pgClusterstringThe name of the PerconaPGCluster to perform restore.true
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
+ + +

+ PerconaPGRestore.status + ↩ Parent +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
completedstringfalse
jobNamestringfalse
statestringfalse
+

postgres-operator.crunchydata.com/v1beta1

Resource Types: @@ -1416,6 +20357,13 @@ Resource requirements for the PGUpgrade container. + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -1429,6 +20377,33 @@ Resource requirements for the PGUpgrade container. +

+ PGUpgrade.spec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PGUpgrade.spec.tolerations[index] ↩ Parent @@ -1515,7 +20490,8 @@ PGUpgradeStatus defines the observed state of PGUpgrade -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // other fields } @@ -2098,12 +21074,12 @@ Defines a PersistentVolumeClaim spec used to create and/or bind a volume - + - + @@ -2152,6 +21128,13 @@ resources represents the minimum resources the volume should have. If RecoverVol + + + + + @@ -2161,6 +21144,33 @@ resources represents the minimum resources the volume should have. If RecoverVol
dataSource objectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false
dataSourceRef objectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false
selectormap[string]int or string Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ true
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limits map[string]int or string
+

+ PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSource ↩ Parent @@ -2168,7 +21178,7 @@ resources represents the minimum resources the volume should have. If RecoverVol -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -2205,7 +21215,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -2231,6 +21241,11 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
@@ -3923,6 +22938,13 @@ Resource limits for backup jobs. Includes manual, scheduled and replica create b + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -3936,6 +22958,33 @@ Resource limits for backup jobs. Includes manual, scheduled and replica create b +

+ PostgresCluster.spec.backups.pgbackrest.jobs.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.backups.pgbackrest.jobs.tolerations[index] ↩ Parent @@ -5311,6 +24360,13 @@ Resource requirements for a pgBackRest repository host + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -5324,6 +24380,33 @@ Resource requirements for a pgBackRest repository host +

+ PostgresCluster.spec.backups.pgbackrest.repoHost.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.backups.pgbackrest.repoHost.sshConfigMap ↩ Parent @@ -5545,7 +24628,7 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t topologyKey string - TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. true whenUnsatisfiable @@ -5557,12 +24640,29 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t object LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. false + + matchLabelKeys + []string + MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + false minDomains integer MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + false + + nodeAffinityPolicy + string + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + false + + nodeTaintsPolicy + string + NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. false @@ -6911,6 +26011,13 @@ Resource requirements for the pgBackRest restore Job. + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -6924,6 +26031,33 @@ Resource requirements for the pgBackRest restore Job. +

+ PostgresCluster.spec.backups.pgbackrest.restore.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.backups.pgbackrest.restore.tolerations[index] ↩ Parent @@ -7030,14 +26164,53 @@ Defines the configuration for the pgBackRest sidecar container -

- PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources - ↩ Parent +

+ PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources + ↩ Parent +

+ + + +Resource requirements for a sidecar container + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
+ + +

+ PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources.claims[index] + ↩ Parent

-Resource requirements for a sidecar container +ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -7049,15 +26222,10 @@ Resource requirements for a sidecar container - - - - - - - - - + + + +
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/falsenamestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
@@ -7108,6 +26276,13 @@ Resource requirements for a sidecar container + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -7121,6 +26296,33 @@ Resource requirements for a sidecar container +

+ PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index] ↩ Parent @@ -7244,12 +26446,12 @@ Defines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernet dataSource object - dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false dataSourceRef object - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false selector @@ -7298,6 +26500,13 @@ resources represents the minimum resources the volume should have. If RecoverVol map[string]int or string Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ true + + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false limits map[string]int or string @@ -7307,6 +26516,33 @@ resources represents the minimum resources the volume should have. If RecoverVol +

+ PostgresCluster.spec.instances[index].dataVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].dataVolumeClaimSpec.dataSource ↩ Parent @@ -7314,7 +26550,7 @@ resources represents the minimum resources the volume should have. If RecoverVol -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -7351,7 +26587,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -7377,6 +26613,11 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
@@ -8704,7 +27945,7 @@ A single application container that you want to run within a pod. ports []object - List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. false readinessProbe @@ -9156,7 +28397,7 @@ PostStart is called immediately after a container is created. If the handler fai tcpSocket object - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false @@ -9275,7 +28516,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -9331,7 +28572,7 @@ PreStop is called immediately before a container is terminated due to an API req - +
tcpSocket objectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
@@ -9450,7 +28691,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -9531,7 +28772,7 @@ Periodic probe of container liveness. Container will be restarted if the probe f - + @@ -9693,7 +28934,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
tcpSocket objectTCPSocket specifies an action involving a TCP port.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
terminationGracePeriodSeconds
@@ -9821,7 +29062,7 @@ Periodic probe of container service readiness. Container will be removed from se - + @@ -9983,7 +29224,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
tcpSocket objectTCPSocket specifies an action involving a TCP port.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
terminationGracePeriodSeconds
@@ -10027,6 +29268,13 @@ Compute Resources required by this container. Cannot be updated. More info: http + + + + + @@ -10040,6 +29288,33 @@ Compute Resources required by this container. Cannot be updated. More info: http
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+

+ PostgresCluster.spec.instances[index].containers[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].containers[index].securityContext ↩ Parent @@ -10321,7 +29596,7 @@ StartupProbe indicates that the Pod has successfully initialized. If specified, tcpSocket object - TCPSocket specifies an action involving a TCP port. + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false terminationGracePeriodSeconds @@ -10483,7 +29758,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -10643,6 +29918,13 @@ Compute resources of a PostgreSQL container. + + + + + @@ -10656,6 +29938,33 @@ Compute resources of a PostgreSQL container.
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+

+ PostgresCluster.spec.instances[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].sidecars ↩ Parent @@ -10729,6 +30038,13 @@ Resource requirements for a sidecar container + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -10742,6 +30058,33 @@ Resource requirements for a sidecar container +

+ PostgresCluster.spec.instances[index].sidecars.replicaCertCopy.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].tablespaceVolumes[index] ↩ Parent @@ -10800,12 +30143,12 @@ Defines a PersistentVolumeClaim for a tablespace. More info: https://kubernetes. dataSource object - dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false dataSourceRef object - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false resources @@ -10843,7 +30186,7 @@ Defines a PersistentVolumeClaim for a tablespace. More info: https://kubernetes. -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -10880,7 +30223,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -10906,6 +30249,11 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
@@ -10929,6 +30277,13 @@ resources represents the minimum resources the volume should have. If RecoverVol + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -10942,6 +30297,33 @@ resources represents the minimum resources the volume should have. If RecoverVol +

+ PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.selector ↩ Parent @@ -11084,7 +30466,7 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t topologyKey string - TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. true whenUnsatisfiable @@ -11096,12 +30478,29 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t object LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. false + + matchLabelKeys + []string + MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + false minDomains integer MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + false + + nodeAffinityPolicy + string + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + false + + nodeTaintsPolicy + string + NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. false @@ -11207,12 +30606,12 @@ Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More dataSource object - dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false dataSourceRef object - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false selector @@ -11261,6 +30660,13 @@ resources represents the minimum resources the volume should have. If RecoverVol map[string]int or string Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ true + + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false limits map[string]int or string @@ -11270,6 +30676,33 @@ resources represents the minimum resources the volume should have. If RecoverVol +

+ PostgresCluster.spec.instances[index].walVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.instances[index].walVolumeClaimSpec.dataSource ↩ Parent @@ -11277,7 +30710,7 @@ resources represents the minimum resources the volume should have. If RecoverVol -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -11314,7 +30747,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -11340,6 +30773,11 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
@@ -12290,12 +31728,12 @@ Defines a PersistentVolumeClaim spec used to create and/or bind a volume dataSource object - dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false dataSourceRef object - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false resources @@ -12333,7 +31771,7 @@ Defines a PersistentVolumeClaim spec used to create and/or bind a volume -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -12370,7 +31808,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -12396,18 +31834,62 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
+ + +

+ PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources + ↩ Parent +

+ + + +resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
-

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources - ↩ Parent +

+ PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources.claims[index] + ↩ Parent

-resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -12419,15 +31901,10 @@ resources represents the minimum resources the volume should have. If RecoverVol - - - - - - - - - + + + +
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/falsenamestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
@@ -14073,6 +33550,13 @@ Resource requirements for the pgBackRest restore Job. + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -14086,6 +33570,33 @@ Resource requirements for the pgBackRest restore Job. +

+ PostgresCluster.spec.dataSource.pgbackrest.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.dataSource.pgbackrest.tolerations[index] ↩ Parent @@ -15402,6 +34913,13 @@ Resource requirements for the pgBackRest restore Job. + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -15415,6 +34933,33 @@ Resource requirements for the pgBackRest restore Job. +

+ PostgresCluster.spec.dataSource.postgresCluster.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.dataSource.postgresCluster.tolerations[index] ↩ Parent @@ -16173,7 +35718,7 @@ Projected secret containing custom TLS certificates to encrypt output from the e name string - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? false optional @@ -16240,6 +35785,13 @@ Changing this value causes PostgreSQL and the exporter to restart. More info: ht + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -16253,6 +35805,33 @@ Changing this value causes PostgreSQL and the exporter to restart. More info: ht +

+ PostgresCluster.spec.monitoring.pgmonitor.exporter.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.patroni ↩ Parent @@ -16402,6 +35981,11 @@ Defines a PgBouncer proxy and connection pooler. object A secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths false + + exposeSuperusers + boolean + Allow SUPERUSERs to connect through PGBouncer. + false image string @@ -18122,7 +37706,7 @@ A single application container that you want to run within a pod. ports []object - List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. false readinessProbe @@ -18574,7 +38158,7 @@ PostStart is called immediately after a container is created. If the handler fai tcpSocket object - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false @@ -18693,7 +38277,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -18749,7 +38333,7 @@ PreStop is called immediately before a container is terminated due to an API req - +
tcpSocket objectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
@@ -18868,7 +38452,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -18949,7 +38533,7 @@ Periodic probe of container liveness. Container will be restarted if the probe f - + @@ -19111,7 +38695,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
tcpSocket objectTCPSocket specifies an action involving a TCP port.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
terminationGracePeriodSeconds
@@ -19239,7 +38823,7 @@ Periodic probe of container service readiness. Container will be removed from se - + @@ -19401,7 +38985,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
tcpSocket objectTCPSocket specifies an action involving a TCP port.TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false
terminationGracePeriodSeconds
@@ -19445,6 +39029,13 @@ Compute Resources required by this container. Cannot be updated. More info: http + + + + + @@ -19458,6 +39049,33 @@ Compute Resources required by this container. Cannot be updated. More info: http
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+

+ PostgresCluster.spec.proxy.pgBouncer.containers[index].resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext ↩ Parent @@ -19739,7 +39357,7 @@ StartupProbe indicates that the Pod has successfully initialized. If specified, tcpSocket object - TCPSocket specifies an action involving a TCP port. + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported false terminationGracePeriodSeconds @@ -19901,7 +39519,7 @@ HTTPHeader describes a custom header to be used in HTTP probes -TCPSocket specifies an action involving a TCP port. +TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported @@ -20135,6 +39753,13 @@ Compute resources of a PgBouncer container. Changing this value causes PgBouncer + + + + + @@ -20148,6 +39773,33 @@ Compute resources of a PgBouncer container. Changing this value causes PgBouncer
claims[]objectClaims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers.false
limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+

+ PostgresCluster.spec.proxy.pgBouncer.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.proxy.pgBouncer.service ↩ Parent @@ -20290,6 +39942,13 @@ Resource requirements for a sidecar container + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -20303,6 +39962,33 @@ Resource requirements for a sidecar container +

+ PostgresCluster.spec.proxy.pgBouncer.sidecars.pgbouncerConfig.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.proxy.pgBouncer.tolerations[index] ↩ Parent @@ -20376,7 +40062,7 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t topologyKey string - TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. true whenUnsatisfiable @@ -20388,12 +40074,29 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t object LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. false + + matchLabelKeys + []string + MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + false minDomains integer MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + false + + nodeAffinityPolicy + string + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + false + + nodeTaintsPolicy + string + NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. false @@ -20709,12 +40412,12 @@ Defines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes. dataSource object - dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. false dataSourceRef object - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. false resources @@ -20752,7 +40455,7 @@ Defines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes. -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. +dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. @@ -20789,7 +40492,7 @@ dataSource field can be used to specify either: * An existing VolumeSnapshot obj -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
@@ -20815,6 +40518,11 @@ dataSourceRef specifies the object from which to populate the volume with data, + + + + +
string APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. false
namespacestringNamespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.false
@@ -20838,6 +40546,13 @@ resources represents the minimum resources the volume should have. If RecoverVol + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -20851,6 +40566,33 @@ resources represents the minimum resources the volume should have. If RecoverVol +

+ PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.selector ↩ Parent @@ -22598,6 +42340,13 @@ Compute resources of a pgAdmin container. Changing this value causes pgAdmin to + claims + []object + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + This field is immutable. It can only be set for containers. + false + limits map[string]int or string Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -22611,6 +42360,33 @@ Compute resources of a pgAdmin container. Changing this value causes pgAdmin to +

+ PostgresCluster.spec.userInterface.pgAdmin.resources.claims[index] + ↩ Parent +

+ + + +ResourceClaim references one entry in PodSpec.ResourceClaims. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestringName must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.true
+ +

PostgresCluster.spec.userInterface.pgAdmin.service ↩ Parent @@ -22753,7 +42529,7 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t topologyKey string - TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. true whenUnsatisfiable @@ -22765,12 +42541,29 @@ TopologySpreadConstraint specifies how to spread matching pods among the given t object LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. false + + matchLabelKeys + []string + MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + false minDomains integer MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate. + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + false + + nodeAffinityPolicy + string + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + false + + nodeTaintsPolicy + string + NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. false @@ -22883,6 +42676,11 @@ A label selector requirement is a selector that contains values, a key, and an o object Properties of the password generated for this user. false + + secretName + string + The secret name to generate user, password, connection info this PostgreSQL user. + false diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index 75c023d39..02272f290 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,8 +3,8 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.5-1 - postgresVersion: 14 + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.3-2 + postgresVersion: 15 instances: - name: instance1 dataVolumeClaimSpec: @@ -15,7 +15,7 @@ spec: storage: 1Gi backups: pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.40-1 + image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.45-2 repos: - name: repo1 volume: @@ -35,4 +35,4 @@ spec: storage: 1Gi proxy: pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.17-1 + image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-2 diff --git a/go.mod b/go.mod index 85182ad1e..402dc565f 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,6 @@ require ( sigs.k8s.io/yaml v1.3.0 ) -require github.com/rogpeppe/go-internal v1.6.1 // indirect - require ( github.com/Percona-Lab/percona-version-service v0.0.0-20230404081016-ea25e30cdcbc github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -84,6 +82,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect diff --git a/hack/update-pgmonitor-installer.sh b/hack/update-pgmonitor-installer.sh index b74652643..088bc2908 100755 --- a/hack/update-pgmonitor-installer.sh +++ b/hack/update-pgmonitor-installer.sh @@ -19,7 +19,7 @@ directory=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) # The pgMonitor tag to use to refresh the current monitoring installer -pgmonitor_tag=v4.8.0 +pgmonitor_tag=v4.8.1 # Set the directory for the monitoring Kustomize installer pgo_examples_monitoring_dir="${directory}/../../postgres-operator-examples/kustomize/monitoring" diff --git a/installers/olm/Makefile b/installers/olm/Makefile index b0751e03c..af853c438 100644 --- a/installers/olm/Makefile +++ b/installers/olm/Makefile @@ -126,4 +126,4 @@ validate-%-image: build-bundle-images: $(distros:%=build-%-image) build-%-image: - ./build-image.sh '$(CONTAINER)' 'bundles/$*' '$*' '$(PGO_VERSION)' \ No newline at end of file + ./build-image.sh '$(CONTAINER)' 'bundles/$*' '$*' '$(PGO_VERSION)' diff --git a/installers/olm/bundle.annotations.yaml b/installers/olm/bundle.annotations.yaml index 74144a3be..8f4dde50d 100644 --- a/installers/olm/bundle.annotations.yaml +++ b/installers/olm/bundle.annotations.yaml @@ -33,6 +33,6 @@ annotations: # https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory com.redhat.delivery.operator.bundle: true - com.redhat.openshift.versions: 'v4.9' + com.redhat.openshift.versions: 'v4.10' ... diff --git a/installers/olm/bundle.csv.yaml b/installers/olm/bundle.csv.yaml index 06cc2c5d7..aec698563 100644 --- a/installers/olm/bundle.csv.yaml +++ b/installers/olm/bundle.csv.yaml @@ -57,7 +57,7 @@ spec: # https://olm.operatorframework.io/docs/best-practices/common/ # Note: The minKubeVersion must correspond to the lowest supported OCP version - minKubeVersion: 1.21.0 + minKubeVersion: 1.23.0 maturity: stable # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/how-to-update-operators.md#replaces--channels replaces: '' # generate.sh diff --git a/installers/olm/bundle.relatedImages.yaml b/installers/olm/bundle.relatedImages.yaml new file mode 100644 index 000000000..3824b27b2 --- /dev/null +++ b/installers/olm/bundle.relatedImages.yaml @@ -0,0 +1,25 @@ + relatedImages: + - name: PGADMIN + image: registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256: + - name: PGBACKREST + image: registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256: + - name: PGBOUNCER + image: registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256: + - name: PGEXPORTER + image: registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256: + - name: PGUPGRADE + image: registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256: + - name: POSTGRES_14 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: + - name: POSTGRES_15 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: + - name: POSTGRES_14_GIS_3.1 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: + - name: POSTGRES_14_GIS_3.2 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: + - name: POSTGRES_14_GIS_3.3 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: + - name: POSTGRES_15_GIS_3.3 + image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: + - name: postgres-operator + image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: diff --git a/installers/olm/config/examples/kustomization.yaml b/installers/olm/config/examples/kustomization.yaml index b01a39610..6d6125735 100644 --- a/installers/olm/config/examples/kustomization.yaml +++ b/installers/olm/config/examples/kustomization.yaml @@ -15,3 +15,4 @@ kind: Kustomization resources: - postgrescluster.example.yaml +#- pgupgrade.example.yaml diff --git a/installers/olm/config/examples/pgupgrade.example.yaml b/installers/olm/config/examples/pgupgrade.example.yaml index 2b369211f..ad4f45310 100644 --- a/installers/olm/config/examples/pgupgrade.example.yaml +++ b/installers/olm/config/examples/pgupgrade.example.yaml @@ -4,5 +4,5 @@ metadata: name: example-upgrade spec: postgresClusterName: example - fromPostgresVersion: 13 - toPostgresVersion: 14 + fromPostgresVersion: 14 + toPostgresVersion: 15 diff --git a/installers/olm/config/examples/postgrescluster.example.yaml b/installers/olm/config/examples/postgrescluster.example.yaml index 11dc47979..63bd9abca 100644 --- a/installers/olm/config/examples/postgrescluster.example.yaml +++ b/installers/olm/config/examples/postgrescluster.example.yaml @@ -39,4 +39,4 @@ spec: enabled: false image: percona/pmm-client:2.32.0 secret: example-pmm-secret - serverHost: monitoring-service \ No newline at end of file + serverHost: monitoring-service diff --git a/installers/olm/config/redhat/kustomization.yaml b/installers/olm/config/redhat/kustomization.yaml index 8f753fa7a..a34c7b484 100644 --- a/installers/olm/config/redhat/kustomization.yaml +++ b/installers/olm/config/redhat/kustomization.yaml @@ -3,4 +3,4 @@ kind: Kustomization resources: - ../operator -- ../examples \ No newline at end of file +- ../examples diff --git a/installers/olm/description.md b/installers/olm/description.md index 735063e8b..2dca52419 100644 --- a/installers/olm/description.md +++ b/installers/olm/description.md @@ -91,4 +91,4 @@ Create, Scale, & Delete PostgreSQL clusters with ease, while fully customizing y * Customize your PostgreSQL configuration * Bring your own trusted certificate authority (CA) for use with the Operator API server * Override your PostgreSQL configuration for each cluster - * Use your own custom images, re-define the image for each container separately \ No newline at end of file + * Use your own custom images, re-define the image for each container separately diff --git a/internal/bridge/client.go b/internal/bridge/client.go index daf2c54c3..01d006dea 100644 --- a/internal/bridge/client.go +++ b/internal/bridge/client.go @@ -19,6 +19,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -32,6 +33,8 @@ import ( const defaultAPI = "https://api.crunchybridge.com" +var errAuthentication = errors.New("authentication failed") + type Client struct { http.Client wait.Backoff @@ -179,6 +182,38 @@ func (c *Client) doWithRetry( return response, err } +func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthObject, error) { + var result AuthObject + + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/auth-objects", nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + authn.Secret}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // 401, Unauthorized + case response.StatusCode == 401: + err = fmt.Errorf("%w: %s", errAuthentication, body) + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { var result Installation @@ -188,20 +223,18 @@ func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { if err == nil { defer response.Body.Close() - - var body bytes.Buffer - _, _ = io.Copy(&body, response.Body) + body, _ := io.ReadAll(response.Body) switch { // 2xx, Successful - case 200 <= response.StatusCode && response.StatusCode < 300: - if err = json.Unmarshal(body.Bytes(), &result); err != nil { - err = fmt.Errorf("%w: %v", err, body.String()) + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) } default: //nolint:goerr113 // This is intentionally dynamic. - err = fmt.Errorf("%v: %v", response.Status, body.String()) + err = fmt.Errorf("%v: %s", response.Status, body) } } diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go index fc212ecba..729cf521d 100644 --- a/internal/bridge/client_test.go +++ b/internal/bridge/client_test.go @@ -405,6 +405,87 @@ func TestClientDoWithRetry(t *testing.T) { }) } +func TestClientCreateAuthObject(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + assert.Equal(t, len(body), 0) + requests = append(requests, *r) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + _, _ = client.CreateAuthObject(ctx, AuthObject{Secret: "sesame"}) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer sesame") + }) + + t.Run("Unauthorized", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`some info`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "authentication") + assert.ErrorContains(t, err, "some info") + assert.ErrorIs(t, err, errAuthentication) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`some message`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "404 Not Found") + assert.ErrorContains(t, err, "some message") + }) + + t.Run("NoResponseBody", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "unexpected end") + assert.ErrorContains(t, err, "JSON") + }) + + t.Run("ResponseNotJSON", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`asdf`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "asdf") + }) +} + func TestClientCreateInstallation(t *testing.T) { t.Run("ErrorResponse", func(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go index 76394438e..ab8ace874 100644 --- a/internal/bridge/installation.go +++ b/internal/bridge/installation.go @@ -18,12 +18,15 @@ package bridge import ( "context" "encoding/json" + "errors" "sync" "time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" corev1apply "k8s.io/client-go/applyconfigurations/core/v1" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -59,12 +62,15 @@ type Installation struct { type InstallationReconciler struct { Owner client.FieldOwner Reader interface { - Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + Get(context.Context, types.NamespacedName, client.Object, ...client.GetOption) error } Writer interface { Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error } + // Refresh is the frequency at which AuthObjects should be renewed. + Refresh time.Duration + // SecretRef is the name of the corev1.Secret in which to store Bridge tokens. SecretRef client.ObjectKey @@ -79,6 +85,7 @@ func ManagedInstallationReconciler(m manager.Manager, newClient func() *Client) Owner: naming.ControllerBridge, Reader: kubernetes, Writer: kubernetes, + Refresh: 2 * time.Hour, SecretRef: naming.AsObjectKey(naming.OperatorConfigurationSecret()), NewClient: newClient, } @@ -119,7 +126,7 @@ func (r *InstallationReconciler) Reconcile( // make it so. secret.Namespace, secret.Name = request.Namespace, request.Name - err = r.reconcile(ctx, secret) + result.RequeueAfter, err = r.reconcile(ctx, secret) } // TODO: Check for corev1.NamespaceTerminatingCause after @@ -134,10 +141,14 @@ func (r *InstallationReconciler) Reconcile( return result, err } -func (r *InstallationReconciler) reconcile(ctx context.Context, read *corev1.Secret) error { +// reconcile looks for an Installation in read and stores it or another in +// the [self] singleton after a successful response from the Bridge API. +func (r *InstallationReconciler) reconcile( + ctx context.Context, read *corev1.Secret) (next time.Duration, err error, +) { write, err := corev1apply.ExtractSecret(read, string(r.Owner)) if err != nil { - return err + return 0, err } // We GET-extract-PATCH the Secret and do not build it up from scratch. @@ -157,24 +168,30 @@ func (r *InstallationReconciler) reconcile(ctx context.Context, read *corev1.Sec // Secret which triggers another reconcile. if len(installation.ID) == 0 { if len(self.ID) == 0 { - return r.register(ctx, write) + return 0, r.register(ctx, write) } data := map[string][]byte{} data[KeyBridgeToken], _ = json.Marshal(self.Installation) //nolint:errchkjson - return r.persist(ctx, write.WithData(data)) + return 0, r.persist(ctx, write.WithData(data)) } - // When the Secret has an Installation, store it in memory. - // TODO: Validate it first; perhaps refresh the AuthObject. - if len(self.ID) == 0 { - self.Lock() - self.Installation = installation - self.Unlock() + // Read the timestamp from the Secret, if any. + var touched time.Time + if yaml.Unmarshal(read.Data[KeyBridgeLocalTime], &touched) != nil { + touched = time.Time{} + } + + // Refresh the AuthObject when there is no Installation in memory, + // there is no timestamp, or the timestamp is far away. This writes to + // the Secret which triggers another reconcile. + if len(self.ID) == 0 || time.Since(touched) > r.Refresh || time.Until(touched) > r.Refresh { + return 0, r.refresh(ctx, installation, write) } - return nil + // Trigger another reconcile one interval after the stored timestamp. + return wait.Jitter(time.Until(touched.Add(r.Refresh)), 0.1), nil } // persist uses Server-Side Apply to write config to Kubernetes. The Name and @@ -198,6 +215,52 @@ func (r *InstallationReconciler) persist( return err } +// refresh calls the Bridge API to refresh the AuthObject of installation. It +// combines the result with installation and stores that in the [self] singleton +// and the write object in Kubernetes. The Name and Namespace fields of the +// latter cannot be nil. +func (r *InstallationReconciler) refresh( + ctx context.Context, installation Installation, + write *corev1apply.SecretApplyConfiguration, +) error { + result, err := r.NewClient().CreateAuthObject(ctx, installation.AuthObject) + + // An authentication error means the installation is irrecoverably expired. + // Remove it from the singleton and move it to a dated entry in the Secret. + if err != nil && errors.Is(err, errAuthentication) { + self.Lock() + self.Installation = Installation{} + self.Unlock() + + keyExpiration := KeyBridgeToken + + installation.AuthObject.ExpiresAt.UTC().Format("--2006-01-02") + + data := make(map[string][]byte, 2) + data[KeyBridgeToken] = nil + data[keyExpiration], _ = json.Marshal(installation) //nolint:errchkjson + + return r.persist(ctx, write.WithData(data)) + } + + if err == nil { + installation.AuthObject = result + + // Store the new value in the singleton. + self.Lock() + self.Installation = installation + self.Unlock() + + // Store the new value in the Secret along with the current time. + data := make(map[string][]byte, 2) + data[KeyBridgeLocalTime], _ = metav1.Now().MarshalJSON() + data[KeyBridgeToken], _ = json.Marshal(installation) //nolint:errchkjson + + err = r.persist(ctx, write.WithData(data)) + } + + return err +} + // register calls the Bridge API to register a new Installation. It stores the // result in the [self] singleton and the write object in Kubernetes. The Name // and Namespace fields of the latter cannot be nil. diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go index 40390d2f2..6c2d6a7fd 100644 --- a/internal/bridge/installation_test.go +++ b/internal/bridge/installation_test.go @@ -22,8 +22,10 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "gotest.tools/v3/assert" + cmpopt "gotest.tools/v3/assert/opt" corev1 "k8s.io/api/core/v1" corev1apply "k8s.io/client-go/applyconfigurations/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -127,8 +129,9 @@ func TestInstallationReconcile(t *testing.T) { } ctx := context.Background() - err := reconciler.reconcile(ctx, secret) + next, err := reconciler.reconcile(ctx, secret) assert.NilError(t, err) + assert.Assert(t, next == 0) // It calls the API. assert.Equal(t, len(requests), 1) @@ -178,7 +181,7 @@ func TestInstallationReconcile(t *testing.T) { } ctx := context.Background() - err := reconciler.reconcile(ctx, secret) + _, err := reconciler.reconcile(ctx, secret) assert.Equal(t, err, expected, "expected a Kubernetes error") // It stores the API result in memory. @@ -227,8 +230,9 @@ func TestInstallationReconcile(t *testing.T) { } ctx := context.Background() - err := reconciler.reconcile(ctx, secret) + next, err := reconciler.reconcile(ctx, secret) assert.NilError(t, err) + assert.Assert(t, next == 0) assert.Equal(t, self.ID, "asdf", "expected no change to memory") @@ -254,7 +258,7 @@ func TestInstallationReconcile(t *testing.T) { } ctx := context.Background() - err := reconciler.reconcile(ctx, secret) + _, err := reconciler.reconcile(ctx, secret) assert.Equal(t, err, expected, "expected a Kubernetes error") assert.Equal(t, self.ID, "asdf", "expected no change to memory") }) @@ -262,7 +266,7 @@ func TestInstallationReconcile(t *testing.T) { // Scenario: // When there is a Secret but no Installation in memory, - // Then Reconcile should store it in memory. + // Then Reconcile should verify it in the API and store it in memory. // t.Run("Restart", func(t *testing.T) { var reconciler *InstallationReconciler @@ -271,18 +275,228 @@ func TestInstallationReconcile(t *testing.T) { beforeEach := func() { reconciler = new(InstallationReconciler) secret = new(corev1.Secret) - secret.Data = map[string][]byte{KeyBridgeToken: []byte(`{"id":"xyz"}`)} + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{ + "id":"xyz", "auth_object":{ + "secret":"abc", + "expires_at":"2020-10-28T05:06:07Z" + } + }`), + } self.Installation = Installation{} } - t.Run("ItLoads", func(t *testing.T) { + t.Run("ItVerifies", func(t *testing.T) { beforeEach() + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "def"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + ctx := context.Background() - err := reconciler.reconcile(ctx, secret) + next, err := reconciler.reconcile(ctx, secret) assert.NilError(t, err) + assert.Assert(t, next == 0) + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer abc") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. assert.Equal(t, self.ID, "xyz") + assert.Equal(t, self.AuthObject.Secret, "def") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"def"`)) + }) + + t.Run("Expired", func(t *testing.T) { + beforeEach() + + // API double; authentication error. + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.DeepEqual(t, self.Installation, Installation{}) + + // It archives the expired one. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Equal(t, len(decoded.Data["bridge-token"]), 0) + + archived := string(decoded.Data["bridge-token--2020-10-28"]) + assert.Assert(t, cmp.Contains(archived, `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(archived, `"secret":"abc"`)) + }) + }) + + // Scenario: + // When there is an Installation in the Secret and in memory, + // Then Reconcile should refresh it periodically. + // + t.Run("Refresh", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func(timestamp []byte) { + reconciler = new(InstallationReconciler) + reconciler.Refresh = time.Minute + + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{"id":"ddd", "auth_object":{"secret":"eee"}}`), + KeyBridgeLocalTime: timestamp, + } + + self.Installation = Installation{ID: "ddd"} + } + + for _, tt := range []struct { + Name string + Timestamp []byte + }{ + {Name: "NoTimestamp", Timestamp: nil}, + {Name: "BadTimestamp", Timestamp: []byte(`asdf`)}, + {Name: "OldTimestamp", Timestamp: []byte(`"2020-10-10T20:20:20Z"`)}, + {Name: "FutureTimestamp", Timestamp: []byte(`"2030-10-10T20:20:20Z"`)}, + } { + t.Run(tt.Name, func(t *testing.T) { + beforeEach(tt.Timestamp) + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "fresh"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer eee") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. + assert.Equal(t, self.ID, "ddd") + assert.Equal(t, self.AuthObject.Secret, "fresh") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"ddd"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"fresh"`)) + }) + } + + t.Run("CurrentTimestamp", func(t *testing.T) { + current := time.Now().Add(-15 * time.Minute) + currentJSON, _ := current.UTC().MarshalJSON() + + beforeEach(currentJSON) + reconciler.Refresh = time.Hour + + // Any API calls would panic because no spies are configured here. + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + + // The next reconcile is scheduled around (60 - 15 =) 45 minutes + // from now, plus or minus (60 * 10% =) 6 minutes of jitter. + assert.DeepEqual(t, next, 45*time.Minute, + cmpopt.DurationWithThreshold(6*time.Minute)) }) }) } diff --git a/internal/config/config.go b/internal/config/config.go index 109147c7c..70b071f30 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -98,3 +98,43 @@ func PostgresContainerImage(cluster *v1beta1.PostgresCluster) string { func PGONamespace() string { return os.Getenv("PGO_NAMESPACE") } + +// VerifyImageValues checks that all container images required by the +// spec are defined. If any are undefined, a list is returned in an error. +func VerifyImageValues(cluster *v1beta1.PostgresCluster) error { + + var images []string + + if PGBackRestContainerImage(cluster) == "" { + images = append(images, "crunchy-pgbackrest") + } + if PGAdminContainerImage(cluster) == "" && + cluster.Spec.UserInterface != nil && + cluster.Spec.UserInterface.PGAdmin != nil { + images = append(images, "crunchy-pgadmin4") + } + if PGBouncerContainerImage(cluster) == "" && + cluster.Spec.Proxy != nil && + cluster.Spec.Proxy.PGBouncer != nil { + images = append(images, "crunchy-pgbouncer") + } + if PGExporterContainerImage(cluster) == "" && + cluster.Spec.Monitoring != nil && + cluster.Spec.Monitoring.PGMonitor != nil && + cluster.Spec.Monitoring.PGMonitor.Exporter != nil { + images = append(images, "crunchy-postgres-exporter") + } + if PostgresContainerImage(cluster) == "" { + if cluster.Spec.PostGISVersion != "" { + images = append(images, "crunchy-postgres-gis") + } else { + images = append(images, "crunchy-postgres") + } + } + + if len(images) > 0 { + return fmt.Errorf("Missing image(s): %s", images) + } + + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 58dc45dd5..72a19d80e 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -145,3 +145,56 @@ func TestPostgresContainerImage(t *testing.T) { cluster.Spec.Image = "spec-image" assert.Equal(t, PostgresContainerImage(cluster), "spec-image") } + +func TestVerifyImageValues(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + + verifyImageCheck := func(t *testing.T, envVar, errString string, cluster *v1beta1.PostgresCluster) { + unsetEnv(t, envVar) + err := VerifyImageValues(cluster) + assert.ErrorContains(t, err, errString) + } + + t.Run("crunchy-postgres", func(t *testing.T) { + cluster.Spec.PostgresVersion = 14 + verifyImageCheck(t, "RELATED_IMAGE_POSTGRES_14", "crunchy-postgres", cluster) + }) + + t.Run("crunchy-postgres-gis", func(t *testing.T) { + cluster.Spec.PostGISVersion = "3.3" + verifyImageCheck(t, "RELATED_IMAGE_POSTGRES_14_GIS_3.3", "crunchy-postgres-gis", cluster) + }) + + t.Run("crunchy-pgbackrest", func(t *testing.T) { + verifyImageCheck(t, "RELATED_IMAGE_PGBACKREST", "crunchy-pgbackrest", cluster) + }) + + t.Run("crunchy-pgbouncer", func(t *testing.T) { + cluster.Spec.Proxy = new(v1beta1.PostgresProxySpec) + cluster.Spec.Proxy.PGBouncer = new(v1beta1.PGBouncerPodSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGBOUNCER", "crunchy-pgbouncer", cluster) + }) + + t.Run("crunchy-pgadmin4", func(t *testing.T) { + cluster.Spec.UserInterface = new(v1beta1.UserInterfaceSpec) + cluster.Spec.UserInterface.PGAdmin = new(v1beta1.PGAdminPodSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGADMIN", "crunchy-pgadmin4", cluster) + }) + + t.Run("crunchy-postgres-exporter", func(t *testing.T) { + cluster.Spec.Monitoring = new(v1beta1.MonitoringSpec) + cluster.Spec.Monitoring.PGMonitor = new(v1beta1.PGMonitorSpec) + cluster.Spec.Monitoring.PGMonitor.Exporter = new(v1beta1.ExporterSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGEXPORTER", "crunchy-postgres-exporter", cluster) + }) + + t.Run("multiple images", func(t *testing.T) { + err := VerifyImageValues(cluster) + assert.ErrorContains(t, err, "crunchy-postgres-gis") + assert.ErrorContains(t, err, "crunchy-pgbackrest") + assert.ErrorContains(t, err, "crunchy-pgbouncer") + assert.ErrorContains(t, err, "crunchy-pgadmin4") + assert.ErrorContains(t, err, "crunchy-postgres-exporter") + }) + +} diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index a30bae045..fe7a56846 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -314,6 +314,15 @@ func pgUpgradeContainerImage(upgrade *v1beta1.PGUpgrade) string { return defaultFromEnv(image, "RELATED_IMAGE_PGUPGRADE") } +// verifyUpgradeImageValue checks that the upgrade container image required by the +// spec is defined. If it is undefined, an error is returned. +func verifyUpgradeImageValue(upgrade *v1beta1.PGUpgrade) error { + if pgUpgradeContainerImage(upgrade) == "" { + return fmt.Errorf("Missing crunchy-upgrade image") + } + return nil +} + // jobFailed returns "true" if the Job provided has failed. Otherwise it returns "false". func jobFailed(job *batchv1.Job) bool { conditions := job.Status.Conditions diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 06558cbac..ab7f430b1 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -16,6 +16,7 @@ package pgupgrade import ( "context" + "os" "strings" "testing" @@ -264,3 +265,57 @@ spec: status: {} `)) } + +// saveEnv preserves environment variables so that any modifications needed for +// the tests can be undone once completed. +func saveEnv(t testing.TB, key string) { + t.Helper() + previous, ok := os.LookupEnv(key) + t.Cleanup(func() { + if ok { + os.Setenv(key, previous) + } else { + os.Unsetenv(key) + } + }) +} + +func setEnv(t testing.TB, key, value string) { + t.Helper() + saveEnv(t, key) + assert.NilError(t, os.Setenv(key, value)) +} + +func unsetEnv(t testing.TB, key string) { + t.Helper() + saveEnv(t, key) + assert.NilError(t, os.Unsetenv(key)) +} + +func TestPGUpgradeContainerImage(t *testing.T) { + upgrade := &v1beta1.PGUpgrade{} + + unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "") + + setEnv(t, "RELATED_IMAGE_PGUPGRADE", "") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "") + + setEnv(t, "RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "env-var-pgbackrest") + + assert.NilError(t, yaml.Unmarshal( + []byte(`{ image: spec-image }`), &upgrade.Spec)) + assert.Equal(t, pgUpgradeContainerImage(upgrade), "spec-image") +} + +func TestVerifyUpgradeImageValue(t *testing.T) { + upgrade := &v1beta1.PGUpgrade{} + + t.Run("crunchy-postgres", func(t *testing.T) { + unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + err := verifyUpgradeImageValue(upgrade) + assert.ErrorContains(t, err, "crunchy-upgrade") + }) + +} diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 53de82573..24a90aed3 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -197,6 +197,19 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } + if err = verifyUpgradeImageValue(upgrade); err != nil { + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.GetGeneration(), + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeInvalid", + Message: fmt.Sprintf("Error: %s", err), + }) + + return ctrl.Result{}, nil + } + setStatusToProgressingIfReasonWas("PGUpgradeInvalid", upgrade) // Observations and cluster validation @@ -317,7 +330,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // // Requiring the cluster be shutdown also provides some assurance that the // user understands downtime requirement of upgrading - if !world.ClusterShutdown || world.ClusterPrimary == nil { + if !world.ClusterShutdown { meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ ObservedGeneration: upgrade.Generation, Type: ConditionPGUpgradeProgressing, @@ -331,6 +344,22 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( setStatusToProgressingIfReasonWas("PGClusterNotShutdown", upgrade) + // A separate check for primary identification allows for cases where the + // PostgresCluster may not have been initialized properly. + if world.ClusterPrimary == nil { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterPrimaryNotIdentified", + Message: "PostgresCluster primary instance not identified", + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGClusterPrimaryNotIdentified", upgrade) + if version != int64(upgrade.Spec.FromPostgresVersion) && statusVersion != int64(upgrade.Spec.ToPostgresVersion) { meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index 7251b0e1b..9d5ea54f9 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -79,8 +79,19 @@ func TestServerSideApply(t *testing.T) { after := constructor() assert.NilError(t, cc.Patch(ctx, after, client.Apply, reconciler.Owner)) assert.Assert(t, after.GetResourceVersion() != "") - assert.Assert(t, after.GetResourceVersion() != before.GetResourceVersion(), - "expected https://github.com/kubernetes-sigs/controller-runtime/issues/1356") + + switch { + // TODO(tjmoore4): The update currently impacts 1.28+ only, but may be + // backpatched in the future. + // - https://github.com/kubernetes/kubernetes/pull/116865 + case serverVersion.LessThan(version.MustParseGeneric("1.28")): + + assert.Assert(t, after.GetResourceVersion() != before.GetResourceVersion(), + "expected https://issue.k8s.io/116861") + + default: + assert.Assert(t, after.GetResourceVersion() == before.GetResourceVersion()) + } // Our apply method generates the correct apply-patch. again := constructor() diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 47a71affc..acd0d813e 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -238,13 +238,13 @@ func (r *Reconciler) generateClusterReplicaService( // replica instances. func (r *Reconciler) reconcileClusterReplicaService( ctx context.Context, cluster *v1beta1.PostgresCluster, -) error { +) (*corev1.Service, error) { service, err := r.generateClusterReplicaService(cluster) if err == nil { err = errors.WithStack(r.apply(ctx, service)) } - return err + return service, err } // reconcileDataSource is responsible for reconciling the data source for a PostgreSQL cluster. diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 84edd7ddc..2ced2f2e0 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + "github.com/percona/percona-postgresql-operator/internal/config" "github.com/percona/percona-postgresql-operator/internal/logging" "github.com/percona/percona-postgresql-operator/internal/pgaudit" "github.com/percona/percona-postgresql-operator/internal/pgbackrest" @@ -147,6 +148,20 @@ func (r *Reconciler) Reconcile( // Perform initial validation on a cluster // TODO: Move this to a defaulting (mutating admission) webhook // to leverage regular validation. + + // verify all needed image values are defined + if err := config.VerifyImageValues(cluster); err != nil { + // warning event with missing image information + r.Recorder.Event(cluster, corev1.EventTypeWarning, "MissingRequiredImage", + err.Error()) + // specifically allow reconciliation if the cluster is shutdown to + // facilitate upgrades, otherwise return + if cluster.Spec.Shutdown == nil || + (cluster.Spec.Shutdown != nil && !*cluster.Spec.Shutdown) { + return result, err + } + } + if cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled && cluster.Spec.Standby.Host == "" && @@ -171,8 +186,10 @@ func (r *Reconciler) Reconcile( patroniLeaderService *corev1.Service primaryCertificate *corev1.SecretProjection primaryService *corev1.Service + replicaService *corev1.Service rootCA *pki.RootCertificateAuthority monitoringSecret *corev1.Secret + exporterQueriesConfig *corev1.ConfigMap exporterWebConfig *corev1.ConfigMap err error ) @@ -298,10 +315,10 @@ func (r *Reconciler) Reconcile( primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService) } if err == nil { - err = r.reconcileClusterReplicaService(ctx, cluster) + replicaService, err = r.reconcileClusterReplicaService(ctx, cluster) } if err == nil { - primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService) + primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService) } if err == nil { err = r.reconcilePatroniDistributedConfiguration(ctx, cluster) @@ -312,14 +329,17 @@ func (r *Reconciler) Reconcile( if err == nil { monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster) } + if err == nil { + exporterQueriesConfig, err = r.reconcileExporterQueriesConfig(ctx, cluster) + } if err == nil { exporterWebConfig, err = r.reconcileExporterWebConfig(ctx, cluster) } if err == nil { err = r.reconcileInstanceSets( - ctx, cluster, clusterConfigMap, clusterReplicationSecret, - rootCA, clusterPodService, instanceServiceAccount, instances, - patroniLeaderService, primaryCertificate, clusterVolumes, exporterWebConfig) + ctx, cluster, clusterConfigMap, clusterReplicationSecret, rootCA, + clusterPodService, instanceServiceAccount, instances, patroniLeaderService, + primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig) } if err == nil { diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 460325529..7f802f96d 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -2,7 +2,7 @@ // +build envtest /* - Copyright 2021 - 2022 Crunchy Data Solutions, Inc. + Copyright 2021 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -173,6 +173,7 @@ metadata: name: carlos spec: postgresVersion: 13 + image: postgres instances: - name: samba dataVolumeClaimSpec: @@ -183,6 +184,7 @@ spec: storage: 1Gi backups: pgbackrest: + image: pgbackrest repos: - name: repo1 volume: @@ -376,6 +378,7 @@ metadata: name: carlos spec: postgresVersion: 13 + image: postgres instances: - name: samba dataVolumeClaimSpec: @@ -386,6 +389,7 @@ spec: storage: 1Gi backups: pgbackrest: + image: pgbackrest repos: - name: repo1 volume: diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index b4c4336d5..914852db3 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -502,7 +502,7 @@ func (r *Reconciler) reconcileInstanceSets( patroniLeaderService *corev1.Service, primaryCertificate *corev1.SecretProjection, clusterVolumes []corev1.PersistentVolumeClaim, - exporterWebConfig *corev1.ConfigMap, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, ) error { // Go through the observed instances and check if a primary has been determined. @@ -539,7 +539,7 @@ func (r *Reconciler) reconcileInstanceSets( rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, findAvailableInstanceNames(*set, instances, clusterVolumes), - numInstancePods, clusterVolumes, exporterWebConfig) + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig) if err == nil { err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, set) @@ -977,7 +977,7 @@ func (r *Reconciler) scaleUpInstances( availableInstanceNames []string, numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, - exporterWebConfig *corev1.ConfigMap, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, ) ([]*appsv1.StatefulSet, error) { log := logging.FromContext(ctx) @@ -1021,7 +1021,7 @@ func (r *Reconciler) scaleUpInstances( clusterConfigMap, clusterReplicationSecret, rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, instances[i], - numInstancePods, clusterVolumes, exporterWebConfig, + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, ) } if err == nil { @@ -1050,7 +1050,7 @@ func (r *Reconciler) reconcileInstance( instance *appsv1.StatefulSet, numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, - exporterWebConfig *corev1.ConfigMap, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, ) error { log := logging.FromContext(ctx).WithValues("instance", instance.Name) ctx = logging.NewContext(ctx, log) @@ -1107,7 +1107,7 @@ func (r *Reconciler) reconcileInstance( // Add pgMonitor resources to the instance Pod spec if err == nil { - err = addPGMonitorToInstancePodSpec(cluster, &instance.Spec.Template, exporterWebConfig) + err = addPGMonitorToInstancePodSpec(cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) } // add nss_wrapper init container and add nss_wrapper env vars to the database and pgbackrest diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index ce0bbf4aa..7e83eebf7 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1165,9 +1165,13 @@ func TestDeleteInstance(t *testing.T) { assert.NilError(t, reconciler.deleteInstance(ctx, cluster, instanceName)) gvks := []schema.GroupVersionKind{ - corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"), + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete PVCs if custom resource is deleted. + //corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"), corev1.SchemeGroupVersion.WithKind("ConfigMap"), - corev1.SchemeGroupVersion.WithKind("Secret"), + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete secrets if custom resource is deleted. + //corev1.SchemeGroupVersion.WithKind("Secret"), appsv1.SchemeGroupVersion.WithKind("StatefulSet"), } @@ -1496,6 +1500,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { `)) }, }} { + test := test t.Run(test.name, func(t *testing.T) { cluster := test.ip.cluster diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 92f01e3ef..caa5199ea 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -154,7 +154,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v // applying the PostgresCluster controller's fully specified intent for the PersistentVolumeClaim // representing a repository. func (r *Reconciler) applyRepoVolumeIntent(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster, spec *corev1.PersistentVolumeClaimSpec, + postgresCluster *v1beta1.PostgresCluster, spec corev1.PersistentVolumeClaimSpec, repoName string, repoResources *RepoResources) (*corev1.PersistentVolumeClaim, error) { repo, err := r.generateRepoVolumeIntent(postgresCluster, spec, repoName, repoResources) @@ -622,7 +622,7 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu } func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresCluster, - spec *corev1.PersistentVolumeClaimSpec, repoName string, + spec corev1.PersistentVolumeClaimSpec, repoName string, repoResources *RepoResources) (*corev1.PersistentVolumeClaim, error) { annotations := naming.Merge( @@ -655,7 +655,7 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC Kind: "PersistentVolumeClaim", }, ObjectMeta: meta, - Spec: *spec, + Spec: spec, } // K8SPG-328: Keep this commented in case of conflicts. @@ -1093,9 +1093,16 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, } } + // Check to see if huge pages have been requested in the spec. If they have, include 'huge_pages = try' + // in the restore command. If they haven't, include 'huge_pages = off'. + hugePagesSetting := "off" + if postgres.HugePagesRequested(cluster) { + hugePagesSetting = "try" + } + // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, pgtablespaceVolumes, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, pgtablespaceVolumes, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() @@ -2501,7 +2508,7 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, if repo.Volume == nil { continue } - repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, &repo.Volume.VolumeClaimSpec, + repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, repo.Volume.VolumeClaimSpec, repo.Name, repoResources) if err != nil { log.Error(err, errMsg) diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 22a2aaf4c..0e99e289f 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -19,6 +19,8 @@ import ( "context" "fmt" "io" + "os" + "strings" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -51,6 +53,14 @@ const ( exporterHost = "localhost" ) +// Defaults for certain values used in queries.yml +// TODO(dsessler7): make these values configurable via spec +var defaultValuesForQueries = map[string]string{ + "PGBACKREST_INFO_THROTTLE_MINUTES": "10", + "PG_STAT_STATEMENTS_LIMIT": "20", + "PG_STAT_STATEMENTS_THROTTLE_MINUTES": "-1", +} + // If pgMonitor is enabled the pgMonitor sidecar(s) have been added to the // instance pod. reconcilePGMonitor will update the database to // create the necessary objects for the tool to run @@ -64,7 +74,6 @@ func (r *Reconciler) reconcilePGMonitor(ctx context.Context, } // reconcilePGMonitorExporter performs setup the postgres_exporter sidecar -// - PodExec to get setup.sql file for the postgres version // - PodExec to run the sql in the primary database // Status.Monitoring.ExporterConfiguration is used to determine when the // pgMonitor postgres_exporter configuration should be added/changed to @@ -79,7 +88,6 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, writablePod *corev1.Pod setup string pgImageSHA string - exporterImageSHA string ) // Find the PostgreSQL instance that can execute SQL that writes to every @@ -90,30 +98,33 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, } // For the writableInstance found above - // 1) make sure the `exporter` container is running - // 2) get and save the imageIDs for the `exporter` and `database` containers, and - // 3) exit early if we can't get the ImageID of either of those containers. - // We use these ImageIDs in the hash we make to see if the operator needs to rerun + // 1) get and save the imageIDs for `database` container, and + // 2) exit early if we can't get the ImageID of this container. + // We use this ImageID and the setup.sql file in the hash we make to see if the operator needs to rerun // the `EnableExporterInPostgreSQL` funcs; that way we are always running // that function against an updated and running pod. if pgmonitor.ExporterEnabled(cluster) { - running, known := writableInstance.IsRunning(naming.ContainerPGMonitorExporter) - if !running || !known { - // Exporter container needs to be available to get setup.sql; - return nil + sql, err := os.ReadFile(fmt.Sprintf("%s/pg%d/setup.sql", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + if err != nil { + return err } + // TODO: Revisit how pgbackrest_info.sh is used with pgMonitor. + // pgMonitor queries expect a path to a script that runs pgBackRest + // info and provides json output. In the queries yaml for pgBackRest + // the default path is `/usr/bin/pgbackrest-info.sh`. We update + // the path to point to the script in our database image. + setup = strings.ReplaceAll(string(sql), "/usr/bin/pgbackrest-info.sh", + "/opt/crunchy/bin/postgres/pgbackrest_info.sh") + for _, containerStatus := range writablePod.Status.ContainerStatuses { - if containerStatus.Name == naming.ContainerPGMonitorExporter { - exporterImageSHA = containerStatus.ImageID - } if containerStatus.Name == naming.ContainerDatabase { pgImageSHA = containerStatus.ImageID } } - // Could not get container imageIDs - if exporterImageSHA == "" || pgImageSHA == "" { + // Could not get container imageID + if pgImageSHA == "" { return nil } } @@ -140,7 +151,7 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, _, err := io.Copy(hasher, stdin) if err == nil { // Use command and image tag in hash to execute hash on image update - _, err = fmt.Fprint(hasher, command, pgImageSHA, exporterImageSHA) + _, err = fmt.Fprint(hasher, command, pgImageSHA, setup) } return err }) @@ -155,13 +166,6 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, // Include the revision hash in any log messages. ctx := logging.NewContext(ctx, logging.FromContext(ctx).WithValues("revision", revision)) - if pgmonitor.ExporterEnabled(cluster) { - exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(writablePod.Namespace, writablePod.Name, naming.ContainerPGMonitorExporter, stdin, stdout, stderr, command...) - } - setup, _, err = pgmonitor.Executor(exec).GetExporterSetupSQL(ctx, cluster.Spec.PostgresVersion) - } - // Apply the necessary SQL and record its hash in cluster.Status if err == nil { err = action(ctx, func(_ context.Context, stdin io.Reader, @@ -254,9 +258,9 @@ func (r *Reconciler) reconcileMonitoringSecret( func addPGMonitorToInstancePodSpec( cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, - exporterWebConfig *corev1.ConfigMap) error { + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { - err := addPGMonitorExporterToInstancePodSpec(cluster, template, exporterWebConfig) + err := addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, exporterWebConfig) return err } @@ -269,12 +273,17 @@ func addPGMonitorToInstancePodSpec( func addPGMonitorExporterToInstancePodSpec( cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, - exporterWebConfig *corev1.ConfigMap) error { + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { if !pgmonitor.ExporterEnabled(cluster) { return nil } + var ( + exporterExtendQueryPathFlag = "--extend.query-path=/opt/crunchy/conf/queries.yml" + exporterWebListenAddressFlag = fmt.Sprintf("--web.listen-address=:%d", exporterPort) + ) + securityContext := initialize.RestrictedSecurityContext() exporterContainer := corev1.Container{ Name: naming.ContainerPGMonitorExporter, @@ -282,19 +291,12 @@ func addPGMonitorExporterToInstancePodSpec( ImagePullPolicy: cluster.Spec.ImagePullPolicy, Resources: cluster.Spec.Monitoring.PGMonitor.Exporter.Resources, Command: []string{ - "/opt/cpm/bin/start.sh", + "postgres_exporter", exporterExtendQueryPathFlag, exporterWebListenAddressFlag, }, Env: []corev1.EnvVar{ - {Name: "CONFIG_DIR", Value: "/opt/cpm/conf"}, - {Name: "POSTGRES_EXPORTER_PORT", Value: fmt.Sprint(exporterPort)}, - {Name: "PGBACKREST_INFO_THROTTLE_MINUTES", Value: "10"}, - {Name: "PG_STAT_STATEMENTS_LIMIT", Value: "20"}, - {Name: "PG_STAT_STATEMENTS_THROTTLE_MINUTES", Value: "-1"}, - {Name: "EXPORTER_PG_HOST", Value: exporterHost}, - {Name: "EXPORTER_PG_PORT", Value: fmt.Sprint(*cluster.Spec.Port)}, - {Name: "EXPORTER_PG_DATABASE", Value: exporterDB}, - {Name: "EXPORTER_PG_USER", Value: pgmonitor.MonitoringUser}, - {Name: "EXPORTER_PG_PASSWORD", ValueFrom: &corev1.EnvVarSource{ + {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("%s:%d/%s", exporterHost, *cluster.Spec.Port, exporterDB)}, + {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, + {Name: "DATA_SOURCE_PASS", ValueFrom: &corev1.EnvVarSource{ // Environment variables are not updated after a secret update. // This could lead to a state where the exporter does not have // the correct password and the container needs to restart. @@ -320,6 +322,10 @@ func addPGMonitorExporterToInstancePodSpec( Name: "exporter-config", // this is the path for custom config as defined in the start.sh script for the exporter container MountPath: "/conf", + }, { + Name: "exporter-queries", + // this is the path for the default config, which we generate and then mount via ConfigMap + MountPath: "/opt/crunchy/conf", }}, } @@ -336,6 +342,15 @@ func addPGMonitorExporterToInstancePodSpec( } template.Spec.Volumes = append(template.Spec.Volumes, configVolume) + // add default exporter queries config volume + queriesVolume := corev1.Volume{Name: "exporter-queries"} + queriesVolume.ConfigMap = &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: exporterQueriesConfig.Name, + }, + } + template.Spec.Volumes = append(template.Spec.Volumes, queriesVolume) + if cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret != nil { configureExporterTLS(cluster, template, exporterWebConfig) } @@ -399,11 +414,7 @@ func configureExporterTLS(cluster *v1beta1.PostgresCluster, template *corev1.Pod }} exporterContainer.VolumeMounts = append(exporterContainer.VolumeMounts, mounts...) - exporterContainer.Env = append(exporterContainer.Env, corev1.EnvVar{ - // TODO (jmckulk): define path not dir - Name: "WEB_CONFIG_DIR", - Value: "web-config/", - }) + exporterContainer.Command = append(exporterContainer.Command, "--web.config.file=/web-config/web-config.yml") } } @@ -463,3 +474,108 @@ tls_server_config: return nil, err } + +func (r *Reconciler) reconcileExporterQueriesConfig(ctx context.Context, + cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { + + existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterQueriesConfigMap(cluster)} + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + + if !pgmonitor.ExporterEnabled(cluster) { + // We could still have a NotFound error here so check the err. + // If no error that means the configmap is found and needs to be deleted + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) + } + return nil, client.IgnoreNotFound(err) + } + + intent := &corev1.ConfigMap{ + ObjectMeta: naming.ExporterQueriesConfigMap(cluster), + Data: map[string]string{"queries.yml": generateQueries(ctx, cluster)}, + } + + intent.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + ) + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleMonitoring, + }) + + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + err = errors.WithStack(r.setControllerReference(cluster, intent)) + if err == nil { + err = errors.WithStack(r.apply(ctx, intent)) + } + if err == nil { + return intent, nil + } + + return nil, err +} + +func generateQueries(ctx context.Context, cluster *v1beta1.PostgresCluster) string { + log := logging.FromContext(ctx) + var queries string + baseQueries := []string{"backrest", "global", "per_db", "nodemx"} + + // TODO: When we add pgbouncer support we will do something like the following: + // if pgbouncerEnabled() { + // baseQueries = append(baseQueries, "pgbouncer") + // } + + for _, queryType := range baseQueries { + queriesContents, err := os.ReadFile(fmt.Sprintf("%s/queries_%s.yml", pgmonitor.GetQueriesConfigDir(ctx), queryType)) + if err != nil { + // log an error, but continue to next iteration + log.Error(err, fmt.Sprintf("Query file queries_%s.yml does not exist (it should)...", queryType)) + continue + } + queries += string(queriesContents) + "\n" + } + + // Add general queries for specific postgres version + queriesGeneral, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_general.yml", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_general.yml does not exist (it should)...", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesGeneral) + "\n" + } + + // Add pg_stat_statement queries for specific postgres version + queriesPgStatStatements, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_pg_stat_statements.yml", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_pg_stat_statements.yml not loaded.", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesPgStatStatements) + "\n" + } + + // If postgres version >= 12, add pg_stat_statements_reset queries + if cluster.Spec.PostgresVersion >= 12 { + queriesPgStatStatementsReset, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_pg_stat_statements_reset_info.yml", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_pg_stat_statements_reset_info.yml not loaded.", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesPgStatStatementsReset) + "\n" + } + } + + // Find and replace default values in queries + for k, v := range defaultValuesForQueries { + queries = strings.ReplaceAll(queries, fmt.Sprintf("#%s#", k), v) + } + + // TODO: Add ability to exclude certain user-specified queries + + return queries +} diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index 9b332c8f2..95b6afb4f 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -42,6 +42,11 @@ import ( "github.com/percona/percona-postgresql-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +var ( + exporterExtendQueryPathFlag = "--extend.query-path=/opt/crunchy/conf/queries.yml" + exporterWebListenAddressFlag = fmt.Sprintf("--web.listen-address=:%d", 9187) +) + func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { image := "test/image:tag" @@ -67,7 +72,7 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { t.Run("ExporterDisabled", func(t *testing.T) { template := &corev1.PodTemplateSpec{} - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil, nil)) assert.DeepEqual(t, template, &corev1.PodTemplateSpec{}) }) @@ -87,12 +92,15 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil)) + exporterQueriesConfig := &corev1.ConfigMap{ + ObjectMeta: naming.ExporterQueriesConfigMap(cluster), + } + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) assert.Equal(t, container.Image, image) assert.Equal(t, container.ImagePullPolicy, corev1.PullAlways) assert.DeepEqual(t, container.Resources, resources) - assert.DeepEqual(t, container.Command, []string{"/opt/cpm/bin/start.sh"}) + assert.DeepEqual(t, container.Command, []string{"postgres_exporter", exporterExtendQueryPathFlag, exporterWebListenAddressFlag}) assert.DeepEqual(t, container.SecurityContext.Capabilities, &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }) @@ -102,16 +110,9 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { assert.Equal(t, *container.Resources.Requests.Cpu(), resource.MustParse("100m")) expectedENV := []corev1.EnvVar{ - {Name: "CONFIG_DIR", Value: "/opt/cpm/conf"}, - {Name: "POSTGRES_EXPORTER_PORT", Value: "9187"}, - {Name: "PGBACKREST_INFO_THROTTLE_MINUTES", Value: "10"}, - {Name: "PG_STAT_STATEMENTS_LIMIT", Value: "20"}, - {Name: "PG_STAT_STATEMENTS_THROTTLE_MINUTES", Value: "-1"}, - {Name: "EXPORTER_PG_HOST", Value: "localhost"}, - {Name: "EXPORTER_PG_PORT", Value: fmt.Sprint(*cluster.Spec.Port)}, - {Name: "EXPORTER_PG_DATABASE", Value: "postgres"}, - {Name: "EXPORTER_PG_USER", Value: pgmonitor.MonitoringUser}, - {Name: "EXPORTER_PG_PASSWORD", ValueFrom: &corev1.EnvVarSource{ + {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("localhost:%d/postgres", *cluster.Spec.Port)}, + {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, + {Name: "DATA_SOURCE_PASS", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: naming.MonitoringUserSecret(cluster).Name, @@ -121,11 +122,39 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}} assert.DeepEqual(t, container.Env, expectedENV) - assert.Assert(t, container.Ports[0].ContainerPort == int32(9187)) - assert.Assert(t, container.Ports[0].Name == "exporter") - assert.Assert(t, container.Ports[0].Protocol == "TCP") + assert.Assert(t, container.Ports[0].ContainerPort == int32(9187), "Exporter container port number not set to '9187'.") + assert.Assert(t, container.Ports[0].Name == "exporter", "Exporter container port name not set to 'exporter'.") + assert.Assert(t, container.Ports[0].Protocol == "TCP", "Exporter container port protocol not set to 'TCP'.") - assert.Assert(t, template.Spec.Volumes != nil) + assert.Assert(t, template.Spec.Volumes != nil, "No volumes were found.") + + var foundDefaultQueriesVolume bool + for _, v := range template.Spec.Volumes { + if v.Name == "exporter-queries" { + assert.DeepEqual(t, v, corev1.Volume{ + Name: "exporter-queries", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: exporterQueriesConfig.Name, + }, + }, + }, + }) + foundDefaultQueriesVolume = true + break + } + } + assert.Assert(t, foundDefaultQueriesVolume, "The default 'exporter-queries' volume was not found.") + + var foundDefaultQueriesMount bool + for _, vm := range container.VolumeMounts { + if vm.Name == "exporter-queries" && vm.MountPath == "/opt/crunchy/conf" { + foundDefaultQueriesMount = true + break + } + } + assert.Assert(t, foundDefaultQueriesMount, "The default 'exporter-queries' volume mount was not found.") }) t.Run("CustomConfig", func(t *testing.T) { @@ -150,8 +179,11 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } + exporterQueriesConfig := &corev1.ConfigMap{ + ObjectMeta: naming.ExporterQueriesConfigMap(cluster), + } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) var foundConfigVolume bool for _, v := range template.Spec.Volumes { @@ -168,7 +200,7 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { break } } - assert.Assert(t, foundConfigVolume) + assert.Assert(t, foundConfigVolume, "The 'exporter-config' volume was not found.") container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) var foundConfigMount bool @@ -178,7 +210,7 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { break } } - assert.Assert(t, foundConfigMount) + assert.Assert(t, foundConfigMount, "The 'exporter-config' volume mount was not found.") }) } @@ -348,6 +380,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { } cluster := &v1beta1.PostgresCluster{} + cluster.Spec.PostgresVersion = 15 cluster.Spec.Monitoring = test.monitoring cluster.Status.Monitoring.ExporterConfiguration = test.status.ExporterConfiguration observed := &observedInstances{forCluster: test.instances} @@ -400,8 +433,8 @@ func TestReconcilePGMonitorExporter(t *testing.T) { called = false assert.NilError(t, reconciler.reconcilePGMonitorExporter(ctx, cluster, observed, nil)) - assert.Assert(t, called) - assert.Assert(t, cluster.Status.Monitoring.ExporterConfiguration != "") + assert.Assert(t, called, "PodExec was not called.") + assert.Assert(t, cluster.Status.Monitoring.ExporterConfiguration != "", "ExporterConfiguration was empty.") }) } @@ -441,7 +474,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { podExecCalled: false, // Status was generated manually for this test case // TODO jmckulk: add code to generate status - status: v1beta1.MonitoringStatus{ExporterConfiguration: "74476b9895"}, + status: v1beta1.MonitoringStatus{ExporterConfiguration: "79b86d7d69"}, statusChangedAfterReconcile: false, }} { t.Run(test.name, func(t *testing.T) { @@ -462,6 +495,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { // Create the test cluster spec with the exporter status set cluster := &v1beta1.PostgresCluster{} + cluster.Spec.PostgresVersion = 15 cluster.Status.Monitoring.ExporterConfiguration = test.status.ExporterConfiguration // Mock up an instances that will be defined in the cluster. The instances should @@ -555,7 +589,7 @@ func TestReconcileMonitoringSecret(t *testing.T) { t.Run("NotExisting", func(t *testing.T) { secret, err := reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, secret == nil) + assert.Assert(t, secret == nil, "Monitoring secret was not nil.") }) t.Run("Existing", func(t *testing.T) { @@ -569,7 +603,7 @@ func TestReconcileMonitoringSecret(t *testing.T) { cluster.Spec.Monitoring = nil actual, err := reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, actual == nil) + assert.Assert(t, actual == nil, "Monitoring secret still exists after turning exporter off.") }) }) @@ -591,35 +625,34 @@ func TestReconcileMonitoringSecret(t *testing.T) { t.Run("NotExisting", func(t *testing.T) { existing, err = reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, existing != nil) + assert.Assert(t, existing != nil, "Monitoring secret does not exist.") }) t.Run("Existing", func(t *testing.T) { actual, err = reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, bytes.Equal(actual.Data["password"], existing.Data["password"])) + assert.Assert(t, bytes.Equal(actual.Data["password"], existing.Data["password"]), "Passwords do not match.") }) }) } // TestConfigureExporterTLS checks that tls settings are configured on a podTemplate. // When exporter is enabled with custom tls configureExporterTLS should add volumes, -// volumeMounts, and an envVar to the template. Ensure that existing template configurations -// are still present -func TestConfigreExporterTLS(t *testing.T) { +// volumeMounts, and a flag to the Command. Ensure that existing template configurations +// are still present. +func TestConfigureExporterTLS(t *testing.T) { // Define an existing template with values that could be overwritten baseTemplate := &corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: naming.ContainerPGMonitorExporter, + Command: []string{ + "postgres_exporter", exporterExtendQueryPathFlag, exporterWebListenAddressFlag, + }, VolumeMounts: []corev1.VolumeMount{{ Name: "existing-volume", MountPath: "some-path", }}, - Env: []corev1.EnvVar{{ - Name: "existing-env", - Value: "existing-value", - }}, }}, Volumes: []corev1.Volume{{ Name: "existing-volume", @@ -693,10 +726,11 @@ func TestConfigreExporterTLS(t *testing.T) { - configMap: name: test-exporter-web-config name: web-config - `)) + `), "Volumes are not what they should be.") // Is the exporter container in position 0? - assert.Assert(t, template.Spec.Containers[0].Name == naming.ContainerPGMonitorExporter) + assert.Assert(t, template.Spec.Containers[0].Name == naming.ContainerPGMonitorExporter, + "Exporter container is not in the zeroth position.") // Did we configure the volume mounts on the container while leaving existing // mounts in place? @@ -707,15 +741,100 @@ func TestConfigreExporterTLS(t *testing.T) { name: exporter-certs - mountPath: /web-config name: web-config - `)) - - // Did we set the `WEB_CONFIG_DIR` env var on the container while leaving - // existing vars in place? - assert.Assert(t, marshalMatches(template.Spec.Containers[0].Env, ` -- name: existing-env - value: existing-value -- name: WEB_CONFIG_DIR - value: web-config/ - `)) + `), "Volume mounts are not what they should be.") + + // Did we add the "--web.config.file" flag to the command while leaving the + // rest intact? + assert.DeepEqual(t, template.Spec.Containers[0].Command, []string{"postgres_exporter", + exporterExtendQueryPathFlag, exporterWebListenAddressFlag, "--web.config.file=/web-config/web-config.yml"}) + }) +} + +// TestReconcileExporterQueriesConfig checks that the ConfigMap intent returned by +// reconcileExporterQueriesConfig is correct. If exporter is enabled, the return +// shouldn't be nil. If the exporter is disabled, the return should be nil. +func TestReconcileExporterQueriesConfig(t *testing.T) { + ctx := context.Background() + + // Kubernetes is required because reconcileExporterQueriesConfig + // (1) uses the client to get existing ConfigMaps + // (2) sets the controller reference on the new ConfigMap + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + + cluster := testCluster() + cluster.Default() + cluster.UID = types.UID("hippouid") + cluster.Namespace = setupNamespace(t, cc).Name + + t.Run("ExporterDisabled", func(t *testing.T) { + t.Run("NotExisting", func(t *testing.T) { + queriesConfig, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, queriesConfig == nil, "Default queries ConfigMap is present.") + }) + + t.Run("Existing", func(t *testing.T) { + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{Image: "image"}}} + existing, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err, "error in test; existing config not created") + assert.Assert(t, existing != nil, "error in test; existing config not created") + + cluster.Spec.Monitoring = nil + actual, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, actual == nil, "Default queries config still present after disabling exporter.") + }) + }) + + t.Run("ExporterEnabled", func(t *testing.T) { + var ( + existing, actual *corev1.ConfigMap + err error + ) + + // Enable monitoring in the test cluster spec + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{ + Image: "image", + }, + }, + } + + t.Run("NotExisting", func(t *testing.T) { + existing, err = reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, existing != nil, "Default queries config does not exist.") + }) + + t.Run("Existing", func(t *testing.T) { + actual, err = reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, actual.Data["queries.yml"] == existing.Data["queries.yml"], "Data does not align.") + }) + }) +} + +func TestGenerateQueries(t *testing.T) { + ctx := context.Background() + cluster := &v1beta1.PostgresCluster{} + + t.Run("PG<=11", func(t *testing.T) { + cluster.Spec.PostgresVersion = 11 + queries := generateQueries(ctx, cluster) + assert.Assert(t, !strings.Contains(queries, "ccp_pg_stat_statements_reset"), + "Queries contain 'ccp_pg_stat_statements_reset' query when they should not.") + }) + + t.Run("PG>=12", func(t *testing.T) { + cluster.Spec.PostgresVersion = 12 + queries := generateQueries(ctx, cluster) + assert.Assert(t, strings.Contains(queries, "ccp_pg_stat_statements_reset"), + "Queries do not contain 'ccp_pg_stat_statements_reset' query when they should.") }) } diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index 328e59312..8722d2c66 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -118,6 +118,7 @@ func (r *Reconciler) reconcileRootCertificate( func (r *Reconciler) reconcileClusterCertificate( ctx context.Context, root *pki.RootCertificateAuthority, cluster *v1beta1.PostgresCluster, primaryService *corev1.Service, + replicaService *corev1.Service, ) ( *corev1.SecretProjection, error, ) { @@ -133,7 +134,7 @@ func (r *Reconciler) reconcileClusterCertificate( r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} - dnsNames := naming.ServiceDNSNames(ctx, primaryService) + dnsNames := append(naming.ServiceDNSNames(ctx, primaryService), naming.ServiceDNSNames(ctx, replicaService)...) dnsFQDN := dnsNames[0] if err == nil { diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index 62fdc64c3..285071d02 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -86,6 +86,10 @@ func TestReconcileCerts(t *testing.T) { primaryService.Namespace = namespace primaryService.Name = "the-primary" + replicaService := new(corev1.Service) + replicaService.Namespace = namespace + replicaService.Name = "the-replicas" + t.Run("check root certificate reconciliation", func(t *testing.T) { initialRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -295,14 +299,14 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) t.Run("check standard secret projection", func(t *testing.T) { - secretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster1, primaryService) + secretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster1, primaryService, replicaService) assert.NilError(t, err) assert.DeepEqual(t, testSecretProjection, secretCertProj) }) t.Run("check custom secret projection", func(t *testing.T) { - customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService) + customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService, replicaService) assert.NilError(t, err) assert.DeepEqual(t, customSecretProjection, customSecretCertProj) @@ -319,7 +323,7 @@ func TestReconcileCerts(t *testing.T) { testSecretProjection := clusterCertSecretProjection(testSecret) // reconcile the secret project using the normal process - customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService) + customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService, replicaService) assert.NilError(t, err) // results should be the same @@ -349,7 +353,7 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) // pass in the new root, which should result in a new cluster cert - _, err = r.reconcileClusterCertificate(ctx, returnedRoot, cluster1, primaryService) + _, err = r.reconcileClusterCertificate(ctx, returnedRoot, cluster1, primaryService, replicaService) assert.NilError(t, err) // get the new cluster cert secret @@ -371,11 +375,16 @@ func TestReconcileCerts(t *testing.T) { "got %q", leaf.Certificate.CommonName()) if dnsNames := leaf.Certificate.DNSNames(); assert.Check(t, len(dnsNames) > 1) { - assert.DeepEqual(t, dnsNames[1:], []string{ + assert.DeepEqual(t, dnsNames[1:4], []string{ "the-primary." + namespace + ".svc", "the-primary." + namespace, "the-primary", }) + assert.DeepEqual(t, dnsNames[5:8], []string{ + "the-replicas." + namespace + ".svc", + "the-replicas." + namespace, + "the-replicas", + }) } }) }) diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 0b00336c2..8f1125592 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -60,7 +60,9 @@ func TestGeneratePostgresUserSecret(t *testing.T) { if assert.Check(t, secret != nil) { assert.Equal(t, secret.Namespace, cluster.Namespace) - assert.Assert(t, metav1.IsControlledBy(secret, cluster)) + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete secrets if custom resource is deleted. + //assert.Assert(t, metav1.IsControlledBy(secret, cluster)) assert.DeepEqual(t, secret.Labels, map[string]string{ "postgres-operator.crunchydata.com/cluster": "hippo2", "postgres-operator.crunchydata.com/role": "pguser", @@ -269,7 +271,9 @@ func TestReconcilePostgresVolumes(t *testing.T) { pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil) assert.NilError(t, err) - assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete PVCs if custom resource is deleted. + //assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) @@ -309,7 +313,9 @@ volumeMode: Filesystem pvc, err := reconciler.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, nil) assert.NilError(t, err) - assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete PVCs if custom resource is deleted. + //assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) @@ -407,18 +413,20 @@ volumeMode: Filesystem assert.NilError(t, err) assert.Assert(t, returned == nil) - key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} - if err := tClient.Get(ctx, key, fetched); err == nil { - assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") - } else { - assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) - } - - // Pods will redeploy while the PVC is scheduled for deletion. - observed.Pods = nil - returned, err = reconciler.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, nil) - assert.NilError(t, err) - assert.Assert(t, returned == nil) + // K8SPG-328: Keep this commented in case of conflicts. + // We don't want to delete PVCs if custom resource is deleted. + //key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} + //if err := tClient.Get(ctx, key, fetched); err == nil { + // assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + //} else { + // assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) + //} + // + //// Pods will redeploy while the PVC is scheduled for deletion. + //observed.Pods = nil + //returned, err = reconciler.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, nil) + //assert.NilError(t, err) + //assert.Assert(t, returned == nil) }) }) }) diff --git a/internal/naming/names.go b/internal/naming/names.go index a7eae5a4a..5b5da1d3c 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -364,6 +364,16 @@ func ExporterWebConfigMap(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ExporterQueriesConfigMap returns ObjectMeta necessary to lookup and create the +// exporter queries configmap. This configmap is used to pass the default queries +// to the exporter. +func ExporterQueriesConfigMap(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-exporter-queries-config", + } +} + // OperatorConfigurationSecret returns the ObjectMeta necessary to lookup the // Secret containing PGO configuration. func OperatorConfigurationSecret() metav1.ObjectMeta { diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 6f9f73294..bd69927b0 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -1019,6 +1019,7 @@ func TestProbeTiming(t *testing.T) { FailureThreshold: 1, }}, } { + tt := tt actual := probeTiming(&v1beta1.PatroniSpec{ LeaderLeaseDurationSeconds: &tt.lease, SyncPeriodSeconds: &tt.sync, diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index d71127438..0003c230e 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -177,7 +177,7 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, // - Renames the data directory as needed to bootstrap the cluster using the restored database. // This ensures compatibility with the "existing" bootstrap method that is included in the // Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { +func RestoreCommand(pgdata, hugePagesSetting string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { // After pgBackRest restores files, PostgreSQL starts in recovery to finish // replaying WAL files. "hot_standby" is "on" (by default) so we can detect @@ -236,6 +236,7 @@ max_locks_per_transaction = '${max_lock}' max_prepared_transactions = '${max_ptxn}' max_worker_processes = '${max_work}' unix_socket_directories = '/tmp' +huge_pages = ` + hugePagesSetting + ` EOF if [ "$(< "${pgdata}/PG_VERSION")" -ge 12 ]; then read -r max_wals <<< "${control##*max_wal_senders setting:}" diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 4a4fabb83..3bbed046e 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -297,7 +297,7 @@ func TestRestoreCommand(t *testing.T) { opts := []string{ "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, "--repo=1"} - command := RestoreCommand(pgdata, nil, strings.Join(opts, " ")) + command := RestoreCommand(pgdata, "try", nil, strings.Join(opts, " ")) assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) @@ -312,7 +312,7 @@ func TestRestoreCommand(t *testing.T) { } func TestRestoreCommandPrettyYAML(t *testing.T) { - b, err := yaml.Marshal(RestoreCommand("/dir", nil, "--options")) + b, err := yaml.Marshal(RestoreCommand("/dir", "try", nil, "--options")) assert.NilError(t, err) assert.Assert(t, strings.Contains(string(b), "\n- |"), "expected literal block scalar, got:\n%s", b) diff --git a/internal/pgmonitor/util.go b/internal/pgmonitor/util.go index 863795a8d..44ef3038a 100644 --- a/internal/pgmonitor/util.go +++ b/internal/pgmonitor/util.go @@ -16,9 +16,26 @@ package pgmonitor import ( + "context" + "os" + + "github.com/percona/percona-postgresql-operator/internal/logging" "github.com/percona/percona-postgresql-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func GetQueriesConfigDir(ctx context.Context) string { + log := logging.FromContext(ctx) + // The QUERIES_CONFIG_DIR environment variable can be used to tell postgres-operator where to + // find the setup.sql and queries.yml files when running the postgres-operator binary locally + if queriesConfigDir := os.Getenv("QUERIES_CONFIG_DIR"); queriesConfigDir != "" { + log.Info("Directory for setup.sql and queries files set by QUERIES_CONFIG_DIR env var. " + + "This should only be used when running the postgres-operator binary locally.") + return queriesConfigDir + } + + return "/opt/crunchy/conf" +} + // ExporterEnabled returns true if the monitoring exporter is enabled func ExporterEnabled(cluster *v1beta1.PostgresCluster) bool { if cluster.Spec.Monitoring == nil { diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index c1d4d7c73..91e12a197 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -28,7 +28,7 @@ import ( // it sets the PostgreSQL parameter "huge_pages" to "try". If it doesn't find // one, it sets "huge_pages" to "off". func SetHugePages(cluster *v1beta1.PostgresCluster, pgParameters *Parameters) { - if hugePagesRequested(cluster) { + if HugePagesRequested(cluster) { pgParameters.Default.Add("huge_pages", "try") } else { pgParameters.Default.Add("huge_pages", "off") @@ -37,7 +37,7 @@ func SetHugePages(cluster *v1beta1.PostgresCluster, pgParameters *Parameters) { // This helper function checks to see if a huge_pages value greater than zero has // been set in any of the PostgresCluster's instances' resource specs -func hugePagesRequested(cluster *v1beta1.PostgresCluster) bool { +func HugePagesRequested(cluster *v1beta1.PostgresCluster) bool { for _, instance := range cluster.Spec.InstanceSets { for resourceName := range instance.Resources.Limits { if strings.HasPrefix(resourceName.String(), corev1.ResourceHugePagesPrefix) { diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go index 9cbb76e92..4d2caa7b2 100644 --- a/internal/upgradecheck/helpers_test.go +++ b/internal/upgradecheck/helpers_test.go @@ -43,12 +43,12 @@ type fakeClientWithError struct { errorType string } -func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object) error { +func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { switch f.errorType { case "get error": return fmt.Errorf("get error") default: - return f.Client.Get(ctx, key, obj) + return f.Client.Get(ctx, key, obj, opts...) } } diff --git a/internal/util/features.go b/internal/util/features.go index 754e93bd6..02ae40d17 100644 --- a/internal/util/features.go +++ b/internal/util/features.go @@ -53,7 +53,7 @@ const ( // - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L729-732 var pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, - InstanceSidecars: {Default: true, PreRelease: featuregate.Alpha}, + InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/percona/controller/pgcluster/controller_test.go b/percona/controller/pgcluster/controller_test.go index e15a9f233..eaa3c7709 100644 --- a/percona/controller/pgcluster/controller_test.go +++ b/percona/controller/pgcluster/controller_test.go @@ -1,3 +1,6 @@ +//go:build envtest +// +build envtest + package pgcluster import ( diff --git a/percona/controller/pgcluster/finalizer_test.go b/percona/controller/pgcluster/finalizer_test.go index 2a377cdaf..31299ccb4 100644 --- a/percona/controller/pgcluster/finalizer_test.go +++ b/percona/controller/pgcluster/finalizer_test.go @@ -1,3 +1,6 @@ +//go:build envtest +// +build envtest + package pgcluster import ( diff --git a/percona/controller/pgcluster/status_test.go b/percona/controller/pgcluster/status_test.go index a0d72ae4c..5555b9269 100644 --- a/percona/controller/pgcluster/status_test.go +++ b/percona/controller/pgcluster/status_test.go @@ -1,3 +1,6 @@ +//go:build envtest +// +build envtest + package pgcluster import ( diff --git a/percona/controller/pgcluster/suite_test.go b/percona/controller/pgcluster/suite_test.go index d29b7f0fa..e0aad9e36 100644 --- a/percona/controller/pgcluster/suite_test.go +++ b/percona/controller/pgcluster/suite_test.go @@ -1,3 +1,6 @@ +//go:build envtest +// +build envtest + package pgcluster import ( diff --git a/percona/controller/pgcluster/version_test.go b/percona/controller/pgcluster/version_test.go index 63ee8a886..1bf5e9366 100644 --- a/percona/controller/pgcluster/version_test.go +++ b/percona/controller/pgcluster/version_test.go @@ -1,3 +1,6 @@ +//go:build envtest +// +build envtest + package pgcluster import ( diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go new file mode 100644 index 000000000..9e49d281c --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// PGMonitorSpec defines the desired state of the pgMonitor tool suite +type PGMonitorSpec struct { + // +optional + Exporter *ExporterSpec `json:"exporter,omitempty"` +} + +type ExporterSpec struct { + + // Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports + // the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in + // any volume projected using this field, it will be loaded using the "extend.query-path" flag: + // https://github.com/prometheus-community/postgres_exporter#flags + // Changing the values of field causes PostgreSQL and the exporter to restart. + // +optional + Configuration []corev1.VolumeProjection `json:"configuration,omitempty"` + + // Projected secret containing custom TLS certificates to encrypt output from the exporter + // web server + // +optional + CustomTLSSecret *corev1.SecretProjection `json:"customTLSSecret,omitempty"` + + // The image name to use for crunchy-postgres-exporter containers. The image may + // also be set using the RELATED_IMAGE_PGEXPORTER environment variable. + // +optional + Image string `json:"image,omitempty"` + + // Changing this value causes PostgreSQL and the exporter to restart. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 949e1b9d6..f3b0bd375 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -671,38 +671,6 @@ type MonitoringStatus struct { ExporterConfiguration string `json:"exporterConfiguration,omitempty"` } -// PGMonitorSpec defines the desired state of the pgMonitor tool suite -type PGMonitorSpec struct { - // +optional - Exporter *ExporterSpec `json:"exporter,omitempty"` -} - -type ExporterSpec struct { - - // Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports - // the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in - // any volume projected using this field, it will be loaded using the "extend.query-path" flag: - // https://github.com/prometheus-community/postgres_exporter#flags - // Changing the values of field causes PostgreSQL and the exporter to restart. - // +optional - Configuration []corev1.VolumeProjection `json:"configuration,omitempty"` - - // Projected secret containing custom TLS certificates to encrypt output from the exporter - // web server - // +optional - CustomTLSSecret *corev1.SecretProjection `json:"customTLSSecret,omitempty"` - - // The image name to use for crunchy-postgres-exporter containers. The image may - // also be set using the RELATED_IMAGE_PGEXPORTER environment variable. - // +optional - Image string `json:"image,omitempty"` - - // Changing this value causes PostgreSQL and the exporter to restart. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` -} - func NewPostgresCluster() *PostgresCluster { cluster := &PostgresCluster{} cluster.SetGroupVersionKind(GroupVersion.WithKind("PostgresCluster")) diff --git a/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml b/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml index 4271c061a..0e53eab2d 100644 --- a/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml +++ b/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml @@ -27,4 +27,4 @@ spec: monitoring: pgmonitor: exporter: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.2.0-0 + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.3.1-0 diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml new file mode 100644 index 000000000..fa3985231 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -0,0 +1,11 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # postgres version that is no longer available + fromPostgresVersion: 10 + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade-empty-image diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml new file mode 100644 index 000000000..b7d0f936f --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml new file mode 100644 index 000000000..c85a9b8da --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml @@ -0,0 +1,23 @@ +--- +# Create the cluster we will do an actual upgrade on, but set the postgres version +# to '10' to force a missing image scenario +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + # postgres version that is no longer available + postgresVersion: 10 + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml new file mode 100644 index 000000000..72e9ff638 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml @@ -0,0 +1,12 @@ +--- +# The cluster is not running due to the missing image, not due to a proper +# shutdown status. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml new file mode 100644 index 000000000..316f3a547 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml new file mode 100644 index 000000000..5bd9d447c --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml new file mode 100644 index 000000000..fcdf4f62e --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml @@ -0,0 +1,17 @@ +--- +# Update the postgres version and restart the cluster. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: false + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # update postgres version + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml new file mode 100644 index 000000000..14c33cccf --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml new file mode 100644 index 000000000..316f3a547 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml new file mode 100644 index 000000000..78e51e566 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml new file mode 100644 index 000000000..2fa2c949a --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image + annotations: + postgres-operator.crunchydata.com/allow-upgrade: empty-image-upgrade diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml new file mode 100644 index 000000000..bd828180f --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciliation is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml new file mode 100644 index 000000000..e5f270fb2 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml new file mode 100644 index 000000000..dfcbd4c81 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml new file mode 100644 index 000000000..969e7f0ac --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# Check that the pgbackrest setup has successfully completed +- script: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-empty-image-repo-host" -c pgbackrest -- pgbackrest check --stanza=db diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml new file mode 100644 index 000000000..5315c1d14 --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml @@ -0,0 +1,39 @@ +--- +# Check the version reported by PostgreSQL +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-empty-image-pguser-major-upgrade-empty-image, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml new file mode 100644 index 000000000..56289c35c --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md b/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md new file mode 100644 index 000000000..341cc854f --- /dev/null +++ b/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md @@ -0,0 +1,36 @@ +## Major upgrade missing image tests + +This is a variation derived from our major upgrade KUTTL tests designed to +test scenarios where required container images are not defined in either the +PostgresCluster spec or via the RELATED_IMAGES environment variables. + +### Basic PGUpgrade controller and CRD instance validation + +* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-assert: check that the PGUpgrade instance exists and has the expected status + +### Verify new statuses for missing required container images + +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" +* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade +* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" + +### Update to an available Postgres version, start and upgrade PostgresCluster + +* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false +* 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" +* 13--shutdown-cluster: set spec.shutdown to 'true' +* 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" +* 14--annotate-cluster: set the required annotation +* 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status +* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' + +### Verify upgraded PostgresCluster + +* 15-assert: verify that the cluster is running +* 16-check-pgbackrest: check that the pgbackrest setup has successfully completed +* 17--check-version: check the version reported by PostgreSQL +* 17-assert: assert the Job from the previous step succeeded + + diff --git a/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml b/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml index 11de60216..814958a9f 100644 --- a/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml +++ b/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml @@ -122,8 +122,8 @@ spec: DECLARE result text; BEGIN - SELECT ST_AsText(ST_AsGeoJSON('SRID=4326;POINT(-118.4079 33.9434)'::geography)) INTO result; - ASSERT result = 'POINT(-118.4079 33.9434)', 'GeoJSON check failed'; + SELECT ST_AsGeoJSON('SRID=4326;POINT(-118.4079 33.9434)'::geography) INTO result; + ASSERT result = '{\"type\":\"Point\",\"coordinates\":[-118.4079,33.9434]}', FORMAT('GeoJSON check failed, got %L', result); END \$\$;" 2>&1) if [[ "$RESULT" == *"ERROR"* ]]; then diff --git a/testing/kuttl/e2e/delete-namespace/00--namespace.yaml b/testing/kuttl/e2e/delete-namespace/00--namespace.yaml index 5ff7dde18..617c1e539 100644 --- a/testing/kuttl/e2e/delete-namespace/00--namespace.yaml +++ b/testing/kuttl/e2e/delete-namespace/00--namespace.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: Namespace metadata: - name: kuttl-test-delete-namespace + name: ${KUTTL_TEST_DELETE_NAMESPACE} diff --git a/testing/kuttl/e2e/delete-namespace/01--cluster.yaml b/testing/kuttl/e2e/delete-namespace/01--cluster.yaml index bed1df0d1..fe6392d75 100644 --- a/testing/kuttl/e2e/delete-namespace/01--cluster.yaml +++ b/testing/kuttl/e2e/delete-namespace/01--cluster.yaml @@ -3,26 +3,16 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-namespace - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} spec: postgresVersion: ${KUTTL_PG_VERSION} instances: - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } backups: pgbackrest: repos: - name: repo1 volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/delete-namespace/01-assert.yaml b/testing/kuttl/e2e/delete-namespace/01-assert.yaml index d117e8673..3d2c7ec93 100644 --- a/testing/kuttl/e2e/delete-namespace/01-assert.yaml +++ b/testing/kuttl/e2e/delete-namespace/01-assert.yaml @@ -3,18 +3,18 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-namespace - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} status: instances: - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 --- apiVersion: batch/v1 kind: Job metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace postgres-operator.crunchydata.com/pgbackrest-backup: replica-create diff --git a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml b/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml index b7caacc02..8987d233f 100644 --- a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml +++ b/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml @@ -5,4 +5,4 @@ kind: TestStep delete: - apiVersion: v1 kind: Namespace - name: kuttl-test-delete-namespace + name: ${KUTTL_TEST_DELETE_NAMESPACE} diff --git a/testing/kuttl/e2e/delete-namespace/02-errors.yaml b/testing/kuttl/e2e/delete-namespace/02-errors.yaml index 200dbed12..ee6f31178 100644 --- a/testing/kuttl/e2e/delete-namespace/02-errors.yaml +++ b/testing/kuttl/e2e/delete-namespace/02-errors.yaml @@ -2,13 +2,13 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} name: delete-namespace --- apiVersion: apps/v1 kind: StatefulSet metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace --- @@ -16,34 +16,34 @@ metadata: apiVersion: v1 kind: Endpoints metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace --- apiVersion: v1 kind: Pod metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace --- apiVersion: v1 kind: Service metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace --- apiVersion: v1 kind: Secret metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace --- apiVersion: v1 kind: ConfigMap metadata: - namespace: kuttl-test-delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} labels: postgres-operator.crunchydata.com/cluster: delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/README.md b/testing/kuttl/e2e/delete-namespace/README.md index d1256a7cd..697e2ae91 100644 --- a/testing/kuttl/e2e/delete-namespace/README.md +++ b/testing/kuttl/e2e/delete-namespace/README.md @@ -7,4 +7,5 @@ Note: KUTTL provides a `$NAMESPACE` var that can be used in scripts/commands, but which cannot be used in object definition yamls (like `01--cluster.yaml`). -Therefore, we use a given, non-random namespace: `kuttl-test-delete-namespace`. +Therefore, we use a given, non-random namespace that is defined in the makefile +and generated with `generate-kuttl`. diff --git a/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml b/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml new file mode 100644 index 000000000..ff3a5f356 --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml @@ -0,0 +1,11 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # postgres version that is no longer available + fromPostgresVersion: 10 + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: missing-primary-status diff --git a/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml new file mode 100644 index 000000000..b7d0f936f --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml b/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml new file mode 100644 index 000000000..f205e2bcd --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml @@ -0,0 +1,23 @@ +--- +# Create the cluster we will do an actual upgrade on, but set the postgres version +# to '10' to force a missing image scenario +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: missing-primary-status +spec: + # postgres version that is no longer available + postgresVersion: 10 + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml new file mode 100644 index 000000000..72e9ff638 --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml @@ -0,0 +1,12 @@ +--- +# The cluster is not running due to the missing image, not due to a proper +# shutdown status. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml b/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml new file mode 100644 index 000000000..6d784b682 --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: missing-primary-status +spec: + shutdown: true diff --git a/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml new file mode 100644 index 000000000..5bd9d447c --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/empty-image-upgrade/README.md b/testing/kuttl/e2e/empty-image-upgrade/README.md new file mode 100644 index 000000000..5547515d1 --- /dev/null +++ b/testing/kuttl/e2e/empty-image-upgrade/README.md @@ -0,0 +1,17 @@ +## Empty image upgrade status tests + +This is a variation derived from our major upgrade KUTTL tests designed to +test a scenario where a required container images is not defined in either the +PostgresCluster spec or via the RELATED_IMAGES environment variables. + +### Basic PGUpgrade controller and CRD instance validation + +* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-assert: check that the PGUpgrade instance exists and has the expected status + +### Verify new statuses for missing required container images + +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" +* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade +* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/exporter/00-assert.yaml b/testing/kuttl/e2e/exporter/00-assert.yaml index 55c2422cc..aa4d90835 100644 --- a/testing/kuttl/e2e/exporter/00-assert.yaml +++ b/testing/kuttl/e2e/exporter/00-assert.yaml @@ -9,3 +9,8 @@ status: readyReplicas: 1 replicas: 1 updatedReplicas: 1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-exporter-queries-config diff --git a/testing/kuttl/e2e/exporter/10-assert.yaml b/testing/kuttl/e2e/exporter/10-assert.yaml index 6517873c9..70c628e91 100644 --- a/testing/kuttl/e2e/exporter/10-assert.yaml +++ b/testing/kuttl/e2e/exporter/10-assert.yaml @@ -12,5 +12,10 @@ status: --- apiVersion: v1 kind: ConfigMap +metadata: + name: exporter-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap metadata: name: exporter-tls-exporter-web-config diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml index 5da5be0cc..c83bfea9d 100644 --- a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml @@ -18,3 +18,8 @@ spec: proxy: pgBouncer: replicas: 1 + config: + # Set the pgBouncer verbosity level to debug to print connection logs + # --https://www.pgbouncer.org/config.html#log-settings + global: + verbose: '1' diff --git a/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml b/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml index be5ca2a3c..67e8f31c8 100644 --- a/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml +++ b/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml @@ -10,9 +10,22 @@ commands: --patch '{"data":{"pgbouncer-frontend.crt":""}}' # Wait for the certificate to be regenerated then loaded. - until + # Changing this from "wait until timeout" to "try X times" + # so that we can get the logs before exiting 1 in case we cannot find the reload. + for _ in $(seq 120); do kubectl logs --namespace "${NAMESPACE}" deployment.apps/proxied-pgbouncer \ - --container pgbouncer-config --since-time "${BEFORE}" | grep 'Loaded' - do + --container pgbouncer-config --since-time "${BEFORE}" | grep 'Loaded' && \ + found=true && break sleep 1 done + + # This test has been flaky in the past, potentially around rotating/reloading the cert. + # To help debug, we set the pgBouncer verbosity to 1 (debug) and print the logs + kubectl logs --namespace "${NAMESPACE}" deployment.apps/proxied-pgbouncer \ + --all-containers --prefix --timestamps + + # If we haven't found the `Loaded` log statement, exit with an error + if [ -z "$found" ]; then + echo "pgbouncer-config has failed to reload in time" + exit 1; + fi diff --git a/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml index 91e55ed94..ea8353427 100644 --- a/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml @@ -5,15 +5,13 @@ commands: # Get a list of the current owners of the root ca cert secret and verify that # both owners are listed. - script: | - for i in {1..5}; do + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do sleep 1 # this sleep allows time for the owner reference list to be updated CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') # If owner1 and owner2 are both listed, exit successfully - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } if contains "${CURRENT_OWNERS}" "owner1" && contains "${CURRENT_OWNERS}" "owner2"; then exit 0 fi done - # proper ownership references were not found, so the test fails - exit 1 diff --git a/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml index 3d06d39a2..951f9fce6 100644 --- a/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml @@ -5,15 +5,13 @@ commands: # Get a list of the current owners of the root ca cert secret and verify that # owner1 is no longer listed and owner2 is found. - script: | - for i in {1..5}; do + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do sleep 1 # this sleep allows time for the owner reference list to be updated CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') # If owner1 is removed and owner2 is still listed, exit successfully - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - if ! contains "${CURRENT_OWNERS}" "owner1" && contains "${CURRENT_OWNERS}" "owner2"; then + if !(contains "${CURRENT_OWNERS}" "owner1") && contains "${CURRENT_OWNERS}" "owner2"; then exit 0 fi done - # proper ownership references were not found, so the test fails - exit 1 diff --git a/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml b/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml index 339192e97..9c432f02b 100644 --- a/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml @@ -8,9 +8,12 @@ commands: # secret should be deleted. - script: | NUM_CLUSTERS=$(kubectl --namespace="${NAMESPACE}" get postgrescluster --output name | wc -l) + echo "Found ${NUM_CLUSTERS} clusters" if [ "$NUM_CLUSTERS" != 0 ]; then - for i in {1..5}; do - sleep 1 # This sleep allows time for the owner reference list to be updated + # Continue checking until Kuttl times out + # If at least one owner is never removed the test fails + while true; do + sleep 5 # This sleep allows time for the owner reference list to be updated CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') # If neither owner is listed, exit successfully @@ -19,17 +22,15 @@ commands: exit 0 fi done - # At least one owner was never removed, so the test fails - exit 1 else - for i in {1..5}; do - sleep 1 # this sleep allows time for garbage collector to delete the secret + # Continue checking until Kuttl times out + # If the secret is never removed, the test fails + while true; do + sleep 5 # this sleep allows time for garbage collector to delete the secret ROOT_SECRET=$(kubectl --namespace="${NAMESPACE}" get --ignore-not-found \ secret pgo-root-cacert --output name | wc -l) if [ "$ROOT_SECRET" = 0 ]; then exit 0 fi done - # The root secret was never removed, so the test fails - exit 1 fi diff --git a/testing/kuttl/e2e/streaming-standby/00--secrets.yaml b/testing/kuttl/e2e/streaming-standby/00--secrets.yaml index 2b3c78113..1f8dd06cc 100644 --- a/testing/kuttl/e2e/streaming-standby/00--secrets.yaml +++ b/testing/kuttl/e2e/streaming-standby/00--secrets.yaml @@ -1,8 +1,8 @@ apiVersion: v1 data: - ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNpZ0F3SUJBZ0lSQU0vVDF1MXllNHZ3ek1SWEt1NGlWZVF3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nakl3TlRJek1UZ3hNRFE0V2hjTgpNekl3TlRJd01Ua3hNRFE0V2pBZk1SMHdHd1lEVlFRREV4UndiM04wWjNKbGN5MXZjR1Z5WVhSdmNpMWpZVEJaCk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQlArM2dEb2s3V3duaDZmNnNUV3ozUmlrS3Q4TFhyN0QKSEpGSGNXdHd3MDI5TXQrb0lubWYwUE1VS1BiVHgrSDBSZTBLcENSRUhCbytmcXJqblIzZlBXdWpSVEJETUE0RwpBMVVkRHdFQi93UUVBd0lCQmpBU0JnTlZIUk1CQWY4RUNEQUdBUUgvQWdFQU1CMEdBMVVkRGdRV0JCU3ErUFdhClQreTAvUjBRb1AzUS9nUnZWY3JGQ0RBS0JnZ3Foa2pPUFFRREF3TklBREJGQWlBRHl3UXR2Zk1xUEIvWXlzL1QKd2lNZExNR3JocWVXeDVjYVZ2TWNVWkJxWHdJaEFQS1NBemo5K1RsTzg0cmNFN25pT3U2K2NRWEYzcjNxTFFOYQpNYWVId3d5TAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNNVENDQWRlZ0F3SUJBZ0lRUjVzZEwwcit1S0VTUjVudzFvYkFuakFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNakExTWpNeE9ERXdORGhhRncweQpNekExTWpNeE9URXdORGhhTURzeE9UQTNCZ05WQkFNVE1IQnliMlIxWTNScGIyNHRjSEpwYldGeWVTNXdjbTlrCmRXTjBhVzl1TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzTGpCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUgKQTBJQUJPTWQ3ZUo5U1JtYmd0TUJ1R1hFc1UyNzBOUjhvU0pCSUJEdk1DTGpkSnB0TmVHWCt4MEhHT0ZZRGw0cgpOd0JZUk9EaUc3Z2loaVZ0ZyszTTNhejdjWk9qZ2Rnd2dkVXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01Bd0dBMVVkCkV3RUIvd1FDTUFBd0h3WURWUjBqQkJnd0ZvQVVxdmoxbWsvc3RQMGRFS0Q5MFA0RWIxWEt4UWd3Z1pNR0ExVWQKRVFTQml6Q0JpSUl3Y0hKdlpIVmpkR2x2Ymkxd2NtbHRZWEo1TG5CeWIyUjFZM1JwYjI0dWMzWmpMbU5zZFhOMApaWEl1Ykc5allXd3VnaUZ3Y205a2RXTjBhVzl1TFhCeWFXMWhjbmt1Y0hKdlpIVmpkR2x2Ymk1emRtT0NIWEJ5CmIyUjFZM1JwYjI0dGNISnBiV0Z5ZVM1d2NtOWtkV04wYVc5dWdoSndjbTlrZFdOMGFXOXVMWEJ5YVcxaGNua3cKQ2dZSUtvWkl6ajBFQXdNRFNBQXdSUUloQVBCai9uVU9HeHpsNXVvQnl6WHNuT3ppbTBJNHFVL21pRS9COFpOcApBSTNNQWlBOWtFeUphUGRqVHZ6cEc4LzZxbldrdGh6K1FKK3h5bGtMenNVUUNld2ZhUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUlFRlpFS1ZIZ3YyT25uZkljaXlRUFlzbzBvalBVN3NIVS9KUFI3TE5CWERvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNHgzdDRuMUpHWnVDMHdHNFpjU3hUYnZRMUh5aElrRWdFTzh3SXVOMG1tMDE0WmY3SFFjWQo0VmdPWGlzM0FGaEU0T0lidUNLR0pXMkQ3Y3pkclB0eGt3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNQakNDQWVXZ0F3SUJBZ0lSQU93NURHaGVVZnVNY25KYVdKNkllall3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nak13TkRFeE1UYzBOVE01V2hjTgpNek13TkRBNE1UZzBOVE01V2pBOU1Uc3dPUVlEVlFRREV6SndjbWx0WVhKNUxXTnNkWE4wWlhJdGNISnBiV0Z5CmVTNWtaV1poZFd4MExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc0xqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDkKQXdFSEEwSUFCT3RlNytQWFlDci9RQVJkcHlwYTFHcEpkbW5wOFN3ZG9FOTIzUXoraWt4UllTalgwUHBXcytqUQpVNXlKZ0NDdGxyZmxFZVZ4S2YzaVpiVHdadFlIaHVxamdlTXdnZUF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQXdHCkExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVVkp0R0R0Yk1kMVlmemUrRXBLdGJDMTdINXFZd2daNEcKQTFVZEVRU0JsakNCazRJeWNISnBiV0Z5ZVMxamJIVnpkR1Z5TFhCeWFXMWhjbmt1WkdWbVlYVnNkQzV6ZG1NdQpZMngxYzNSbGNpNXNiMk5oYkM2Q0kzQnlhVzFoY25rdFkyeDFjM1JsY2kxd2NtbHRZWEo1TG1SbFptRjFiSFF1CmMzWmpnaDl3Y21sdFlYSjVMV05zZFhOMFpYSXRjSEpwYldGeWVTNWtaV1poZFd4MGdoZHdjbWx0WVhKNUxXTnMKZFhOMFpYSXRjSEpwYldGeWVUQUtCZ2dxaGtqT1BRUURBd05IQURCRUFpQjA3Q3YzRHJTNXUxRFdaek1MQjdvbAppcjFFWEpQTnFaOXZWQUF5ZTdDMGJRSWdWQVlDM2F0ekl4a0syNHlQUU1TSjU1OGFaN3JEdkZGZXdOaVpmdSt0CjdETT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUoxYkNXMTByR3o2VWQ1K2R3WmZWcGNUNFlqck9XVG1iVW9XNXRxYTA2b1ZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNjE3djQ5ZGdLdjlBQkYybktsclVha2wyYWVueExCMmdUM2JkRFA2S1RGRmhLTmZRK2xhego2TkJUbkltQUlLMld0K1VSNVhFcC9lSmx0UEJtMWdlRzZnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: name: cluster-cert @@ -10,9 +10,9 @@ type: Opaque --- apiVersion: v1 data: - ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNpZ0F3SUJBZ0lSQU0vVDF1MXllNHZ3ek1SWEt1NGlWZVF3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nakl3TlRJek1UZ3hNRFE0V2hjTgpNekl3TlRJd01Ua3hNRFE0V2pBZk1SMHdHd1lEVlFRREV4UndiM04wWjNKbGN5MXZjR1Z5WVhSdmNpMWpZVEJaCk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQlArM2dEb2s3V3duaDZmNnNUV3ozUmlrS3Q4TFhyN0QKSEpGSGNXdHd3MDI5TXQrb0lubWYwUE1VS1BiVHgrSDBSZTBLcENSRUhCbytmcXJqblIzZlBXdWpSVEJETUE0RwpBMVVkRHdFQi93UUVBd0lCQmpBU0JnTlZIUk1CQWY4RUNEQUdBUUgvQWdFQU1CMEdBMVVkRGdRV0JCU3ErUFdhClQreTAvUjBRb1AzUS9nUnZWY3JGQ0RBS0JnZ3Foa2pPUFFRREF3TklBREJGQWlBRHl3UXR2Zk1xUEIvWXlzL1QKd2lNZExNR3JocWVXeDVjYVZ2TWNVWkJxWHdJaEFQS1NBemo5K1RsTzg0cmNFN25pT3U2K2NRWEYzcjNxTFFOYQpNYWVId3d5TAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqakNDQVRXZ0F3SUJBZ0lSQU9waEYwSm16R3p1dWNJS2tLVHZGdzh3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nakl3TlRJek1UZ3hNRFE0V2hjTgpNak13TlRJek1Ua3hNRFE0V2pBWE1SVXdFd1lEVlFRRERBeGZZM0oxYm1Ob2VYSmxjR3d3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUSldWWnphdk1xbU5NYTNwLzhBMXZta2hLZzNpRHU2TUhzc0dCS094bVYKYWdXS0RwRnlxTStYZ3F1bjdxWlUvd2NkRUZ5VFVLVCthUjVRSGozdFZYZlFvMW93V0RBT0JnTlZIUThCQWY4RQpCQU1DQmFBd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTcStQV2FUK3kwL1IwUW9QM1EvZ1J2ClZjckZDREFYQmdOVkhSRUVFREFPZ2d4ZlkzSjFibU5vZVhKbGNHd3dDZ1lJS29aSXpqMEVBd01EUndBd1JBSWcKSE5Hc0NJdEdtcVBLSEY4M2EyazBoVitVSGNDU0VmbExraStsa2RiVnovVUNJSHV0d2VWU0pITk5ieldsd3EyawpxSDhFT1JIOWMvTHJJT2htK1B3UmFqT0kKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= - tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUMyb2ZZNnBROGhlblZJd1RnTjNQYS9jLzRBeGk0NGFMdm1pWiszblhFbGJvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFeVZsV2MycnpLcGpUR3Q2Zi9BTmI1cElTb040Zzd1akI3TEJnU2pzWmxXb0ZpZzZSY3FqUApsNEtycCs2bVZQOEhIUkJjazFDay9ta2VVQjQ5N1ZWMzBBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqekNDQVRTZ0F3SUJBZ0lRRzA0MEprWjYwZkZtanpaVG1SekhyakFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUJjeEZUQVRCZ05WQkFNTURGOWpjblZ1WTJoNWNtVndiREJaTUJNR0J5cUdTTTQ5CkFnRUdDQ3FHU000OUF3RUhBMElBQk5HVHcvSmVtaGxGK28xUlRBb0VXSndzdjJ6WjIyc1p4N2NjT2VmL1NXdjYKeXphYkpaUmkvREFyK0kwUHNyTlhmand3a0xMa3hERGZsTklvcFZMNVYwT2pXakJZTUE0R0ExVWREd0VCL3dRRQpBd0lGb0RBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkZTYlJnN1d6SGRXSDgzdmhLU3JXd3RlCngrYW1NQmNHQTFVZEVRUVFNQTZDREY5amNuVnVZMmg1Y21Wd2JEQUtCZ2dxaGtqT1BRUURBd05KQURCR0FpRUEKcWVsYmUvdTQzRFRPWFdlell1b3Nva0dUbHg1U2ljUFRkNk05Q3pwU2VoWUNJUUNOOS91Znc0SUZzdDZOM1RtYQo4MmZpSElKSUpQY0RjM2ZKUnFna01RQmF0QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBxeTVzNVJxWThKUmdycjJreE9zaG9hc25yTWhUUkJPYjZ0alI3T2ZqTFlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMFpQRDhsNmFHVVg2alZGTUNnUlluQ3kvYk5uYmF4bkh0eHc1NS85SmEvckxOcHNsbEdMOApNQ3Y0alEreXMxZCtQRENRc3VURU1OK1UwaWlsVXZsWFF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: name: replication-cert diff --git a/testing/kuttl/kuttl-test.yaml b/testing/kuttl/kuttl-test.yaml index a14074721..673370750 100644 --- a/testing/kuttl/kuttl-test.yaml +++ b/testing/kuttl/kuttl-test.yaml @@ -2,7 +2,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestSuite testDirs: - testing/kuttl/e2e-generated/ -timeout: 180 +timeout: 300 parallel: 2 # by default kuttl will run in a generated namespace to override # that functionality simply uncomment the line below and replace diff --git a/testing/policies/kyverno/kustomization.yaml b/testing/policies/kyverno/kustomization.yaml index d5bd95420..88e9775e7 100644 --- a/testing/policies/kyverno/kustomization.yaml +++ b/testing/policies/kyverno/kustomization.yaml @@ -7,8 +7,8 @@ bases: resources: # CVE-2020-14386: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2020-012 # CVE-2021-22555: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2021-015 - - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require_drop_all/require_drop_all.yaml - - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require_ro_rootfs/require_ro_rootfs.yaml + - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require-drop-all/require-drop-all.yaml + - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require-ro-rootfs/require-ro-rootfs.yaml # CVE-2020-8554: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2020-015 - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/restrict-service-external-ips/restrict-service-external-ips.yaml