mirror of
https://github.com/cloudnative-pg/plugin-barman-cloud.git
synced 2026-01-13 14:13:10 +01:00
Compare commits
38 Commits
40e5ff3daf
...
dcba57568f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dcba57568f | ||
|
|
316828cc73 | ||
|
|
a1e917f749 | ||
|
|
3d63da0b8d | ||
|
|
116cd910ba | ||
|
|
2fa06b7497 | ||
|
|
1ef53d0341 | ||
|
|
666bc6a580 | ||
|
|
c46ea63302 | ||
|
|
c6daf8b14c | ||
|
|
33f3373b60 | ||
|
|
57e861dea1 | ||
|
|
b073a2ab92 | ||
|
|
f23e56e532 | ||
|
|
545b8f35bb | ||
|
|
2ffa11edc3 | ||
|
|
1012cc6064 | ||
|
|
ddb31dd72a | ||
|
|
2e3b08d5d6 | ||
|
|
4aa47a7a2e | ||
|
|
b0f80519e7 | ||
|
|
45932903b8 | ||
|
|
ffb4ffb7a6 | ||
|
|
054c3844bf | ||
|
|
c2bfe1217e | ||
|
|
49f1096cba | ||
|
|
782fe772f7 | ||
|
|
3a9697e69c | ||
|
|
6700c60446 | ||
|
|
da5acb59d8 | ||
|
|
931a06a407 | ||
|
|
8ec400aae7 | ||
|
|
d8f78f90d0 | ||
|
|
67ecc6228e | ||
|
|
c2671ec92c | ||
|
|
d0b18bb5b9 | ||
|
|
a0e6717026 | ||
|
|
1e13446d56 |
2
.github/workflows/barman-base-image.yml
vendored
2
.github/workflows/barman-base-image.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
- name: Install Dagger
|
||||
env:
|
||||
# renovate: datasource=github-tags depName=dagger/dagger versioning=semver
|
||||
DAGGER_VERSION: 0.19.2
|
||||
DAGGER_VERSION: 0.19.5
|
||||
run: |
|
||||
curl -L https://dl.dagger.io/dagger/install.sh | BIN_DIR=$HOME/.local/bin sh
|
||||
- name: Publish a barman-base
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -44,7 +44,7 @@ jobs:
|
||||
- name: Install Dagger
|
||||
env:
|
||||
# renovate: datasource=github-tags depName=dagger/dagger versioning=semver
|
||||
DAGGER_VERSION: 0.19.2
|
||||
DAGGER_VERSION: 0.19.5
|
||||
run: |
|
||||
curl -L https://dl.dagger.io/dagger/install.sh | BIN_DIR=$HOME/.local/bin sh
|
||||
- name: Run CI task
|
||||
@ -63,7 +63,7 @@ jobs:
|
||||
run: |
|
||||
task publish
|
||||
- name: Attach manifest to workflow run
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: manifest.yaml
|
||||
path: ./manifest.yaml
|
||||
|
||||
2
.github/workflows/publish-docs.yml
vendored
2
.github/workflows/publish-docs.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
# Use the latest LTS version of Node.js already installed on the runner.
|
||||
node-version: latest
|
||||
|
||||
8
.github/workflows/release-please.yml
vendored
8
.github/workflows/release-please.yml
vendored
@ -21,9 +21,9 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 22
|
||||
node-version: 24
|
||||
- name: Install QEMU static binaries
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Install Task
|
||||
@ -31,7 +31,7 @@ jobs:
|
||||
- name: Install Dagger
|
||||
env:
|
||||
# renovate: datasource=github-tags depName=dagger/dagger versioning=semver
|
||||
DAGGER_VERSION: 0.19.2
|
||||
DAGGER_VERSION: 0.19.5
|
||||
run: |
|
||||
curl -L https://dl.dagger.io/dagger/install.sh | BIN_DIR=$HOME/.local/bin sh
|
||||
- name: Create image and manifest
|
||||
@ -42,7 +42,7 @@ jobs:
|
||||
task publish
|
||||
task manifest
|
||||
- name: Attach manifest to workflow run
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: manifest.yaml
|
||||
path: ./manifest.yaml
|
||||
|
||||
2
.github/workflows/release-publish.yml
vendored
2
.github/workflows/release-publish.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
- name: Install Dagger
|
||||
env:
|
||||
# renovate: datasource=github-tags depName=dagger/dagger versioning=semver
|
||||
DAGGER_VERSION: 0.19.2
|
||||
DAGGER_VERSION: 0.19.5
|
||||
run: |
|
||||
curl -L https://dl.dagger.io/dagger/install.sh | BIN_DIR=$HOME/.local/bin sh
|
||||
- name: Create image and manifest
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
{
|
||||
".": "0.7.0"
|
||||
".": "0.9.0"
|
||||
}
|
||||
|
||||
@ -33,6 +33,7 @@ ObjectMeta
|
||||
ObjectStore
|
||||
ObjectStoreSpec
|
||||
ObjectStoreStatus
|
||||
Observability
|
||||
PITR
|
||||
PoR
|
||||
PostgreSQL
|
||||
@ -49,6 +50,7 @@ SPDX
|
||||
SPDX
|
||||
SSL
|
||||
ServerRecoveryWindow
|
||||
ServiceAccount
|
||||
Slonik
|
||||
TLS
|
||||
TODO
|
||||
@ -106,6 +108,7 @@ involvedObject
|
||||
io
|
||||
isWALArchiver
|
||||
jq
|
||||
json
|
||||
jsonpath
|
||||
kb
|
||||
krew
|
||||
@ -118,6 +121,7 @@ md
|
||||
minio
|
||||
namespace
|
||||
namespaces
|
||||
nonResourceURLs
|
||||
objectstore
|
||||
objectstores
|
||||
pluginConfiguration
|
||||
@ -131,7 +135,9 @@ recoverability
|
||||
repos
|
||||
retentionCheckInterval
|
||||
retentionPolicy
|
||||
roleRef
|
||||
rolebinding
|
||||
rolebindings
|
||||
rollout
|
||||
rpc
|
||||
sc
|
||||
|
||||
45
CHANGELOG.md
45
CHANGELOG.md
@ -1,5 +1,50 @@
|
||||
# Changelog
|
||||
|
||||
## [0.9.0](https://github.com/cloudnative-pg/plugin-barman-cloud/compare/v0.8.0...v0.9.0) (2025-11-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **deps:** Update barman-cloud to v3.16.2 ([#609](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/609)) ([1ef53d0](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/1ef53d03416481152fc5f861bc6144e9621919d8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** Update module github.com/onsi/ginkgo/v2 to v2.27.2 ([#635](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/635)) ([57e861d](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/57e861dea1757b6ee3ca55c53f3e30cbaf5ca84a))
|
||||
* **deps:** Update module sigs.k8s.io/controller-runtime to v0.22.4 ([#636](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/636)) ([33f3373](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/33f3373b60fa09b3a6c17462a63b65e31c63a1ee))
|
||||
* **docs:** Use `archiveAdditionalCommandArgs` for WAL archive configuration ([#630](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/630)) ([4aa47a7](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/4aa47a7a2e949f394a6414631967593341bf6e42))
|
||||
|
||||
## [0.8.0](https://github.com/cloudnative-pg/plugin-barman-cloud/compare/v0.7.0...v0.8.0) (2025-10-27)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* **rbac:** Resource names have been prefixed to avoid cluster conflicts. All cluster-scoped and namespace-scoped resources now use the `barman-plugin-` prefix for consistency; see the [Resource Name Migration Guide](https://cloudnative-pg.io/plugin-barman-cloud/resource-name-migration/) for detailed migration instructions.
|
||||
|
||||
### Features
|
||||
|
||||
* **ip:** Assign copyright to the Linux Foundation ([#571](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/571)) ([1be34fe](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/1be34fe13e830a219d0d8d68423caf2d3c55a49b))
|
||||
* **rbac:** Prefix all resource names to avoid cluster conflicts ([#593](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/593)) ([c2bfe12](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/c2bfe1217e8542c80dd2b099d8d966e725e2b280)), closes [#395](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/395)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps,security:** Update to go 1.25.2 ([#581](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/581)) ([523bd1e](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/523bd1e2b3fb1d63ad930d15d172513eb0be7dee)), closes [#580](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/580)
|
||||
* **deps:** Lock file maintenance documentation dependencies ([#555](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/555)) ([fad3a65](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/fad3a65340870c9d1553018e760d72b3f3a8aa4d))
|
||||
* **deps:** Lock file maintenance documentation dependencies ([#612](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/612)) ([da5acb5](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/da5acb59d892670de668835d7850e4e09183e16d))
|
||||
* **deps:** Update all non-major go dependencies ([#616](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/616)) ([3a9697e](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/3a9697e69c16ca913f78278ebe0f89fa355d0726))
|
||||
* **deps:** Update k8s.io/utils digest to bc988d5 ([#559](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/559)) ([36db77c](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/36db77ced4be3f77278c8e831b7fae06c7beb3cb))
|
||||
* **deps:** Update module github.com/cert-manager/cert-manager to v1.19.0 ([#575](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/575)) ([484b280](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/484b28017e23fd5166c558c27c15103a586f068b))
|
||||
* **deps:** Update module github.com/cert-manager/cert-manager to v1.19.1 ([#600](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/600)) ([d8f78f9](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/d8f78f90d02b081ecc4a60ccc925b998f89ced00))
|
||||
* **deps:** Update module github.com/onsi/ginkgo/v2 to v2.26.0 ([#560](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/560)) ([529737f](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/529737ffa43fd4af8a9602a072f9c9eda9f3e747))
|
||||
* **deps:** Update module github.com/onsi/ginkgo/v2 to v2.27.0 ([#614](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/614)) ([6700c60](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/6700c6044603712d77597c1ec46beae59220ef3b))
|
||||
* **deps:** Update module google.golang.org/grpc to v1.76.0 ([#569](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/569)) ([e1bc0a1](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/e1bc0a1d4a4d2c08b69726ab04484b2d43c5adf1))
|
||||
* **deps:** Update module sigs.k8s.io/controller-runtime to v0.22.2 ([#568](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/568)) ([1b5955e](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/1b5955ead9b7e56c48440abd452d348bf0ec5385))
|
||||
* **deps:** Update module sigs.k8s.io/controller-runtime to v0.22.3 ([#586](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/586)) ([ea76733](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/ea7673343a2120fd9871f81688ea0bf68906444a))
|
||||
* Disable management of end-of-wal file flag during backup restoration ([#604](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/604)) ([931a06a](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/931a06a407cc4885bfcd653535a81aca37ecbd0c)), closes [#603](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/603)
|
||||
* **e2e:** Avoid pinpointing the PostgreSQL version ([#562](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/562)) ([5276dd1](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/5276dd17cfd3bea41918a69622c385756b0404cb))
|
||||
* Set LeaderElectionReleaseOnCancel to true to enable RollingUpdates ([#615](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/615)) ([49f1096](https://github.com/cloudnative-pg/plugin-barman-cloud/commit/49f1096cba74008f84435dcbb82e59f43e5ae112)), closes [#419](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/419)
|
||||
|
||||
## [0.7.0](https://github.com/cloudnative-pg/plugin-barman-cloud/compare/v0.6.0...v0.7.0) (2025-09-25)
|
||||
|
||||
|
||||
|
||||
12
Taskfile.yml
12
Taskfile.yml
@ -21,7 +21,7 @@ tasks:
|
||||
# renovate: datasource=git-refs depName=golangci-lint lookupName=https://github.com/sagikazarmark/daggerverse currentValue=main
|
||||
DAGGER_GOLANGCI_LINT_SHA: 6133ad18e131b891d4723b8e25d69f5de077b472
|
||||
# renovate: datasource=docker depName=golangci/golangci-lint versioning=semver
|
||||
GOLANGCI_LINT_VERSION: v2.5.0
|
||||
GOLANGCI_LINT_VERSION: v2.6.1
|
||||
cmds:
|
||||
- >
|
||||
GITHUB_REF= dagger -sc "github.com/sagikazarmark/daggerverse/golangci-lint@${DAGGER_GOLANGCI_LINT_SHA}
|
||||
@ -125,11 +125,11 @@ tasks:
|
||||
desc: Run go test
|
||||
env:
|
||||
# renovate: datasource=docker depName=golang versioning=semver
|
||||
GOLANG_IMAGE_VERSION: 1.25.2
|
||||
GOLANG_IMAGE_VERSION: 1.25.4
|
||||
# renovate: datasource=git-refs depname=kubernetes packageName=https://github.com/kubernetes/kubernetes versioning=semver
|
||||
K8S_VERSION: 1.31.0
|
||||
# renovate: datasource=git-refs depName=controller-runtime packageName=https://github.com/kubernetes-sigs/controller-runtime versioning=semver
|
||||
SETUP_ENVTEST_VERSION: 0.22.3
|
||||
SETUP_ENVTEST_VERSION: 0.22.4
|
||||
cmds:
|
||||
- >
|
||||
GITHUB_REF= dagger -s call -m ./dagger/gotest
|
||||
@ -202,7 +202,7 @@ tasks:
|
||||
- start-build-network
|
||||
vars:
|
||||
# renovate: datasource=github-tags depName=dagger/dagger versioning=semver
|
||||
DAGGER_VERSION: 0.19.2
|
||||
DAGGER_VERSION: 0.19.5
|
||||
DAGGER_ENGINE_IMAGE: registry.dagger.io/engine:v{{ .DAGGER_VERSION }}
|
||||
cmds:
|
||||
- >
|
||||
@ -302,7 +302,7 @@ tasks:
|
||||
- start-kind-cluster
|
||||
vars:
|
||||
# renovate: datasource=docker depName=golang versioning=semver
|
||||
GOLANG_IMAGE_VERSION: 1.25.2
|
||||
GOLANG_IMAGE_VERSION: 1.25.4
|
||||
KUBECONFIG_PATH:
|
||||
sh: mktemp -t kubeconfig-XXXXX
|
||||
env:
|
||||
@ -321,7 +321,7 @@ tasks:
|
||||
- build-images
|
||||
vars:
|
||||
# renovate: datasource=docker depName=golang versioning=semver
|
||||
GOLANG_IMAGE_VERSION: 1.25.2
|
||||
GOLANG_IMAGE_VERSION: 1.25.4
|
||||
env:
|
||||
_EXPERIMENTAL_DAGGER_RUNNER_HOST: docker-container://{{ .DAGGER_ENGINE_CONTAINER_NAME }}
|
||||
cmds:
|
||||
|
||||
@ -6,7 +6,7 @@ namespace: plugin-barman-cloud-system
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
namePrefix: plugin-barman-cloud-
|
||||
#namePrefix: plugin-barman-cloud-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#labels:
|
||||
|
||||
@ -5,7 +5,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-role
|
||||
name: barman-plugin-leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@ -4,11 +4,11 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-rolebinding
|
||||
name: barman-plugin-leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
name: barman-plugin-leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: plugin-barman-cloud
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-auth-role
|
||||
name: barman-plugin-metrics-auth-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-auth-rolebinding
|
||||
name: barman-plugin-metrics-auth-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metrics-auth-role
|
||||
name: barman-plugin-metrics-auth-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: plugin-barman-cloud
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
name: barman-plugin-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
|
||||
@ -5,7 +5,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: objectstore-editor-role
|
||||
name: barman-plugin-objectstore-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- barmancloud.cnpg.io
|
||||
|
||||
@ -5,7 +5,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: objectstore-viewer-role
|
||||
name: barman-plugin-objectstore-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- barmancloud.cnpg.io
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.2 AS gobuilder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.4 AS gobuilder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
# Both components are built before going into a distroless container
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.2 AS gobuilder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25.4 AS gobuilder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
@ -36,7 +36,7 @@ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache
|
||||
# Use plugin-barman-cloud-base to get the dependencies.
|
||||
# pip will build everything inside /usr, so we copy every file into a new
|
||||
# destination that will then be copied into the distroless container
|
||||
FROM ghcr.io/cloudnative-pg/plugin-barman-cloud-base:3.14.1-202508210758 AS pythonbuilder
|
||||
FROM ghcr.io/cloudnative-pg/plugin-barman-cloud-base:3.16.2-202511051806 AS pythonbuilder
|
||||
# Prepare a new /usr/ directory with the files we'll need in the final image
|
||||
RUN mkdir /new-usr/ && \
|
||||
cp -r --parents /usr/local/lib/ /usr/lib/*-linux-gnu/ /usr/local/bin/ \
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
barman[azure,cloud,google,snappy,zstandard,lz4]==3.16.0
|
||||
barman[azure,cloud,google,snappy,zstandard,lz4]==3.16.2
|
||||
setuptools==80.9.0
|
||||
zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
||||
@ -4,41 +4,41 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=sidecar-requirements.txt --strip-extras sidecar-requirements.in
|
||||
#
|
||||
azure-core==1.35.1 \
|
||||
--hash=sha256:12da0c9e08e48e198f9158b56ddbe33b421477e1dc98c2e1c8f9e254d92c468b \
|
||||
--hash=sha256:435d05d6df0fff2f73fb3c15493bb4721ede14203f1ff1382aa6b6b2bdd7e562
|
||||
azure-core==1.36.0 \
|
||||
--hash=sha256:22e5605e6d0bf1d229726af56d9e92bc37b6e726b141a18be0b4d424131741b7 \
|
||||
--hash=sha256:fee9923a3a753e94a259563429f3644aaf05c486d45b1215d098115102d91d3b
|
||||
# via
|
||||
# azure-identity
|
||||
# azure-storage-blob
|
||||
azure-identity==1.25.0 \
|
||||
--hash=sha256:4177df34d684cddc026e6cf684e1abb57767aa9d84e7f2129b080ec45eee7733 \
|
||||
--hash=sha256:becaec086bbdf8d1a6aa4fb080c2772a0f824a97d50c29637ec8cc4933f1e82d
|
||||
azure-identity==1.25.1 \
|
||||
--hash=sha256:87ca8328883de6036443e1c37b40e8dc8fb74898240f61071e09d2e369361456 \
|
||||
--hash=sha256:e9edd720af03dff020223cd269fa3a61e8f345ea75443858273bcb44844ab651
|
||||
# via barman
|
||||
azure-storage-blob==12.26.0 \
|
||||
--hash=sha256:5dd7d7824224f7de00bfeb032753601c982655173061e242f13be6e26d78d71f \
|
||||
--hash=sha256:8c5631b8b22b4f53ec5fff2f3bededf34cfef111e2af613ad42c9e6de00a77fe
|
||||
azure-storage-blob==12.27.1 \
|
||||
--hash=sha256:65d1e25a4628b7b6acd20ff7902d8da5b4fde8e46e19c8f6d213a3abc3ece272 \
|
||||
--hash=sha256:a1596cc4daf5dac9be115fcb5db67245eae894cf40e4248243754261f7b674a6
|
||||
# via barman
|
||||
barman==3.16.0 \
|
||||
--hash=sha256:5e3c616593753dab5051f0d25fe4ed376484a014cde840a1ae5b1b7dc6bf679e \
|
||||
--hash=sha256:d35145035a4f82cd658fd2ab465a94e306802f4dd1788ce54f354e560de94bc9
|
||||
barman==3.16.2 \
|
||||
--hash=sha256:0549f451a1b928647c75c5a2977526233ad7a976bb83e9a4379c33ce61443515 \
|
||||
--hash=sha256:ab0c6f4f5cfc0cc12b087335bdd5def2edbca32bc1bf553cc5a9e78cd83df43a
|
||||
# via -r sidecar-requirements.in
|
||||
boto3==1.40.35 \
|
||||
--hash=sha256:d718df3591c829bcca4c498abb7b09d64d1eecc4e5a2b6cef14b476501211b8a \
|
||||
--hash=sha256:f4c1b01dd61e7733b453bca38b004ce030e26ee36e7a3d4a9e45a730b67bc38d
|
||||
boto3==1.40.66 \
|
||||
--hash=sha256:ee4fe21c5301cc0e11cc11a53e71e5ddd82d5fae42b10fa8e5403f3aa06434e3 \
|
||||
--hash=sha256:f2038d9bac5154da7390c29bfd013546ac96609e7ce5a7f3cb6f99412be3f4c0
|
||||
# via barman
|
||||
botocore==1.40.35 \
|
||||
--hash=sha256:67e062752ff579c8cc25f30f9c3a84c72d692516a41a9ee1cf17735767ca78be \
|
||||
--hash=sha256:c545de2cbbce161f54ca589fbb677bae14cdbfac7d5f1a27f6a620cb057c26f4
|
||||
botocore==1.40.66 \
|
||||
--hash=sha256:98d5766e17e72110b1d08ab510a8475a6597c59d9560235e2d28ae1a4b043b92 \
|
||||
--hash=sha256:e49a55ad54426c4ea853a59ff9d8243023a90c935782d4c287e9b3424883c3fa
|
||||
# via
|
||||
# boto3
|
||||
# s3transfer
|
||||
cachetools==5.5.2 \
|
||||
--hash=sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4 \
|
||||
--hash=sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a
|
||||
cachetools==6.2.1 \
|
||||
--hash=sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701 \
|
||||
--hash=sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201
|
||||
# via google-auth
|
||||
certifi==2025.8.3 \
|
||||
--hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \
|
||||
--hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5
|
||||
certifi==2025.10.5 \
|
||||
--hash=sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de \
|
||||
--hash=sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43
|
||||
# via requests
|
||||
cffi==2.0.0 \
|
||||
--hash=sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb \
|
||||
@ -126,86 +126,120 @@ cffi==2.0.0 \
|
||||
--hash=sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453 \
|
||||
--hash=sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf
|
||||
# via cryptography
|
||||
charset-normalizer==3.4.3 \
|
||||
--hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \
|
||||
--hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \
|
||||
--hash=sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154 \
|
||||
--hash=sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601 \
|
||||
--hash=sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884 \
|
||||
--hash=sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07 \
|
||||
--hash=sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c \
|
||||
--hash=sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64 \
|
||||
--hash=sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe \
|
||||
--hash=sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f \
|
||||
--hash=sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432 \
|
||||
--hash=sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc \
|
||||
--hash=sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa \
|
||||
--hash=sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9 \
|
||||
--hash=sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae \
|
||||
--hash=sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19 \
|
||||
--hash=sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d \
|
||||
--hash=sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e \
|
||||
--hash=sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4 \
|
||||
--hash=sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7 \
|
||||
--hash=sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312 \
|
||||
--hash=sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92 \
|
||||
--hash=sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31 \
|
||||
--hash=sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c \
|
||||
--hash=sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f \
|
||||
--hash=sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99 \
|
||||
--hash=sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b \
|
||||
--hash=sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15 \
|
||||
--hash=sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392 \
|
||||
--hash=sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f \
|
||||
--hash=sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8 \
|
||||
--hash=sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491 \
|
||||
--hash=sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0 \
|
||||
--hash=sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc \
|
||||
--hash=sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0 \
|
||||
--hash=sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f \
|
||||
--hash=sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a \
|
||||
--hash=sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40 \
|
||||
--hash=sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927 \
|
||||
--hash=sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849 \
|
||||
--hash=sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce \
|
||||
--hash=sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14 \
|
||||
--hash=sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05 \
|
||||
--hash=sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c \
|
||||
--hash=sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c \
|
||||
--hash=sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a \
|
||||
--hash=sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc \
|
||||
--hash=sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34 \
|
||||
--hash=sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9 \
|
||||
--hash=sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096 \
|
||||
--hash=sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14 \
|
||||
--hash=sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30 \
|
||||
--hash=sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b \
|
||||
--hash=sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b \
|
||||
--hash=sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942 \
|
||||
--hash=sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db \
|
||||
--hash=sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5 \
|
||||
--hash=sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b \
|
||||
--hash=sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce \
|
||||
--hash=sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669 \
|
||||
--hash=sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0 \
|
||||
--hash=sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018 \
|
||||
--hash=sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93 \
|
||||
--hash=sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe \
|
||||
--hash=sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049 \
|
||||
--hash=sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a \
|
||||
--hash=sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef \
|
||||
--hash=sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2 \
|
||||
--hash=sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca \
|
||||
--hash=sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16 \
|
||||
--hash=sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f \
|
||||
--hash=sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb \
|
||||
--hash=sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1 \
|
||||
--hash=sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557 \
|
||||
--hash=sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37 \
|
||||
--hash=sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7 \
|
||||
--hash=sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72 \
|
||||
--hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \
|
||||
--hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9
|
||||
charset-normalizer==3.4.4 \
|
||||
--hash=sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad \
|
||||
--hash=sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93 \
|
||||
--hash=sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394 \
|
||||
--hash=sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89 \
|
||||
--hash=sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc \
|
||||
--hash=sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86 \
|
||||
--hash=sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63 \
|
||||
--hash=sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d \
|
||||
--hash=sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f \
|
||||
--hash=sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8 \
|
||||
--hash=sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0 \
|
||||
--hash=sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505 \
|
||||
--hash=sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161 \
|
||||
--hash=sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af \
|
||||
--hash=sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152 \
|
||||
--hash=sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318 \
|
||||
--hash=sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72 \
|
||||
--hash=sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4 \
|
||||
--hash=sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e \
|
||||
--hash=sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3 \
|
||||
--hash=sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576 \
|
||||
--hash=sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c \
|
||||
--hash=sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1 \
|
||||
--hash=sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8 \
|
||||
--hash=sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1 \
|
||||
--hash=sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2 \
|
||||
--hash=sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44 \
|
||||
--hash=sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26 \
|
||||
--hash=sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88 \
|
||||
--hash=sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016 \
|
||||
--hash=sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede \
|
||||
--hash=sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf \
|
||||
--hash=sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a \
|
||||
--hash=sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc \
|
||||
--hash=sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0 \
|
||||
--hash=sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84 \
|
||||
--hash=sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db \
|
||||
--hash=sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1 \
|
||||
--hash=sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7 \
|
||||
--hash=sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed \
|
||||
--hash=sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8 \
|
||||
--hash=sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133 \
|
||||
--hash=sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e \
|
||||
--hash=sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef \
|
||||
--hash=sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14 \
|
||||
--hash=sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2 \
|
||||
--hash=sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0 \
|
||||
--hash=sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d \
|
||||
--hash=sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828 \
|
||||
--hash=sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f \
|
||||
--hash=sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf \
|
||||
--hash=sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6 \
|
||||
--hash=sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328 \
|
||||
--hash=sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090 \
|
||||
--hash=sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa \
|
||||
--hash=sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381 \
|
||||
--hash=sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c \
|
||||
--hash=sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb \
|
||||
--hash=sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc \
|
||||
--hash=sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a \
|
||||
--hash=sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec \
|
||||
--hash=sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc \
|
||||
--hash=sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac \
|
||||
--hash=sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e \
|
||||
--hash=sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313 \
|
||||
--hash=sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569 \
|
||||
--hash=sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3 \
|
||||
--hash=sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d \
|
||||
--hash=sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525 \
|
||||
--hash=sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894 \
|
||||
--hash=sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3 \
|
||||
--hash=sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9 \
|
||||
--hash=sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a \
|
||||
--hash=sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9 \
|
||||
--hash=sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14 \
|
||||
--hash=sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25 \
|
||||
--hash=sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50 \
|
||||
--hash=sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf \
|
||||
--hash=sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1 \
|
||||
--hash=sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3 \
|
||||
--hash=sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac \
|
||||
--hash=sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e \
|
||||
--hash=sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815 \
|
||||
--hash=sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c \
|
||||
--hash=sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6 \
|
||||
--hash=sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6 \
|
||||
--hash=sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e \
|
||||
--hash=sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4 \
|
||||
--hash=sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84 \
|
||||
--hash=sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69 \
|
||||
--hash=sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15 \
|
||||
--hash=sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191 \
|
||||
--hash=sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0 \
|
||||
--hash=sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897 \
|
||||
--hash=sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd \
|
||||
--hash=sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2 \
|
||||
--hash=sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794 \
|
||||
--hash=sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d \
|
||||
--hash=sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074 \
|
||||
--hash=sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3 \
|
||||
--hash=sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224 \
|
||||
--hash=sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838 \
|
||||
--hash=sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a \
|
||||
--hash=sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d \
|
||||
--hash=sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d \
|
||||
--hash=sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f \
|
||||
--hash=sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8 \
|
||||
--hash=sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490 \
|
||||
--hash=sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966 \
|
||||
--hash=sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9 \
|
||||
--hash=sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3 \
|
||||
--hash=sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e \
|
||||
--hash=sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608
|
||||
# via requests
|
||||
cramjam==2.11.0 \
|
||||
--hash=sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188 \
|
||||
@ -344,86 +378,86 @@ cramjam==2.11.0 \
|
||||
# via
|
||||
# barman
|
||||
# python-snappy
|
||||
cryptography==46.0.1 \
|
||||
--hash=sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a \
|
||||
--hash=sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f \
|
||||
--hash=sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12 \
|
||||
--hash=sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6 \
|
||||
--hash=sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080 \
|
||||
--hash=sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc \
|
||||
--hash=sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d \
|
||||
--hash=sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475 \
|
||||
--hash=sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75 \
|
||||
--hash=sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead \
|
||||
--hash=sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a \
|
||||
--hash=sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a \
|
||||
--hash=sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab \
|
||||
--hash=sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1 \
|
||||
--hash=sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5 \
|
||||
--hash=sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee \
|
||||
--hash=sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277 \
|
||||
--hash=sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657 \
|
||||
--hash=sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2 \
|
||||
--hash=sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3 \
|
||||
--hash=sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5 \
|
||||
--hash=sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28 \
|
||||
--hash=sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32 \
|
||||
--hash=sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a \
|
||||
--hash=sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128 \
|
||||
--hash=sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7 \
|
||||
--hash=sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca \
|
||||
--hash=sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784 \
|
||||
--hash=sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e \
|
||||
--hash=sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd \
|
||||
--hash=sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736 \
|
||||
--hash=sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8 \
|
||||
--hash=sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a \
|
||||
--hash=sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b \
|
||||
--hash=sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129 \
|
||||
--hash=sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70 \
|
||||
--hash=sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05 \
|
||||
--hash=sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0 \
|
||||
--hash=sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b \
|
||||
--hash=sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0 \
|
||||
--hash=sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8 \
|
||||
--hash=sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46 \
|
||||
--hash=sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0 \
|
||||
--hash=sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b \
|
||||
--hash=sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0 \
|
||||
--hash=sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7 \
|
||||
--hash=sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc \
|
||||
--hash=sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da \
|
||||
--hash=sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9 \
|
||||
--hash=sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef \
|
||||
--hash=sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9 \
|
||||
--hash=sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7 \
|
||||
--hash=sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0 \
|
||||
--hash=sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d
|
||||
cryptography==46.0.3 \
|
||||
--hash=sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217 \
|
||||
--hash=sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d \
|
||||
--hash=sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc \
|
||||
--hash=sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71 \
|
||||
--hash=sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971 \
|
||||
--hash=sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a \
|
||||
--hash=sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926 \
|
||||
--hash=sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc \
|
||||
--hash=sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d \
|
||||
--hash=sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b \
|
||||
--hash=sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20 \
|
||||
--hash=sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044 \
|
||||
--hash=sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3 \
|
||||
--hash=sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715 \
|
||||
--hash=sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4 \
|
||||
--hash=sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506 \
|
||||
--hash=sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f \
|
||||
--hash=sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0 \
|
||||
--hash=sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683 \
|
||||
--hash=sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3 \
|
||||
--hash=sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21 \
|
||||
--hash=sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91 \
|
||||
--hash=sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c \
|
||||
--hash=sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8 \
|
||||
--hash=sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df \
|
||||
--hash=sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c \
|
||||
--hash=sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb \
|
||||
--hash=sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7 \
|
||||
--hash=sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04 \
|
||||
--hash=sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db \
|
||||
--hash=sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459 \
|
||||
--hash=sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea \
|
||||
--hash=sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914 \
|
||||
--hash=sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717 \
|
||||
--hash=sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9 \
|
||||
--hash=sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac \
|
||||
--hash=sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32 \
|
||||
--hash=sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec \
|
||||
--hash=sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1 \
|
||||
--hash=sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb \
|
||||
--hash=sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac \
|
||||
--hash=sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665 \
|
||||
--hash=sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e \
|
||||
--hash=sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb \
|
||||
--hash=sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5 \
|
||||
--hash=sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936 \
|
||||
--hash=sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de \
|
||||
--hash=sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372 \
|
||||
--hash=sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54 \
|
||||
--hash=sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422 \
|
||||
--hash=sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849 \
|
||||
--hash=sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c \
|
||||
--hash=sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963 \
|
||||
--hash=sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018
|
||||
# via
|
||||
# azure-identity
|
||||
# azure-storage-blob
|
||||
# msal
|
||||
# pyjwt
|
||||
google-api-core==2.25.1 \
|
||||
--hash=sha256:8a2a56c1fef82987a524371f99f3bd0143702fecc670c72e600c1cda6bf8dbb7 \
|
||||
--hash=sha256:d2aaa0b13c78c61cb3f4282c464c046e45fbd75755683c9c525e6e8f7ed0a5e8
|
||||
google-api-core==2.28.1 \
|
||||
--hash=sha256:2b405df02d68e68ce0fbc138559e6036559e685159d148ae5861013dc201baf8 \
|
||||
--hash=sha256:4021b0f8ceb77a6fb4de6fde4502cecab45062e66ff4f2895169e0b35bc9466c
|
||||
# via
|
||||
# google-cloud-core
|
||||
# google-cloud-storage
|
||||
google-auth==2.40.3 \
|
||||
--hash=sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca \
|
||||
--hash=sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77
|
||||
google-auth==2.42.1 \
|
||||
--hash=sha256:30178b7a21aa50bffbdc1ffcb34ff770a2f65c712170ecd5446c4bef4dc2b94e \
|
||||
--hash=sha256:eb73d71c91fc95dbd221a2eb87477c278a355e7367a35c0d84e6b0e5f9b4ad11
|
||||
# via
|
||||
# google-api-core
|
||||
# google-cloud-core
|
||||
# google-cloud-storage
|
||||
google-cloud-core==2.4.3 \
|
||||
--hash=sha256:1fab62d7102844b278fe6dead3af32408b1df3eb06f5c7e8634cbd40edc4da53 \
|
||||
--hash=sha256:5130f9f4c14b4fafdff75c79448f9495cfade0d8775facf1b09c3bf67e027f6e
|
||||
google-cloud-core==2.5.0 \
|
||||
--hash=sha256:67d977b41ae6c7211ee830c7912e41003ea8194bff15ae7d72fd6f51e57acabc \
|
||||
--hash=sha256:7c1b7ef5c92311717bd05301aa1a91ffbc565673d3b0b4163a52d8413a186963
|
||||
# via google-cloud-storage
|
||||
google-cloud-storage==3.4.0 \
|
||||
--hash=sha256:16eeca305e4747a6871f8f7627eef3b862fdd365b872ca74d4a89e9841d0f8e8 \
|
||||
--hash=sha256:4c77ec00c98ccc6428e4c39404926f41e2152f48809b02af29d5116645c3c317
|
||||
google-cloud-storage==3.5.0 \
|
||||
--hash=sha256:10b89e1d1693114b3e0ca921bdd28c5418701fd092e39081bb77e5cee0851ab7 \
|
||||
--hash=sha256:e28fd6ad8764e60dbb9a398a7bc3296e7920c494bc329057d828127e5f9630d3
|
||||
# via barman
|
||||
google-crc32c==1.7.1 \
|
||||
--hash=sha256:0f99eaa09a9a7e642a61e06742856eec8b19fc0037832e03f941fe7cf0c8e4db \
|
||||
@ -467,13 +501,13 @@ google-resumable-media==2.7.2 \
|
||||
--hash=sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa \
|
||||
--hash=sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0
|
||||
# via google-cloud-storage
|
||||
googleapis-common-protos==1.70.0 \
|
||||
--hash=sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257 \
|
||||
--hash=sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8
|
||||
googleapis-common-protos==1.71.0 \
|
||||
--hash=sha256:1aec01e574e29da63c80ba9f7bbf1ccfaacf1da877f23609fe236ca7c72a2e2e \
|
||||
--hash=sha256:59034a1d849dc4d18971997a72ac56246570afdd17f9369a0ff68218d50ab78c
|
||||
# via google-api-core
|
||||
idna==3.10 \
|
||||
--hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \
|
||||
--hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3
|
||||
idna==3.11 \
|
||||
--hash=sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea \
|
||||
--hash=sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902
|
||||
# via requests
|
||||
isodate==0.7.2 \
|
||||
--hash=sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15 \
|
||||
@ -485,52 +519,68 @@ jmespath==1.0.1 \
|
||||
# via
|
||||
# boto3
|
||||
# botocore
|
||||
lz4==4.4.4 \
|
||||
--hash=sha256:017f8d269a739405a59d68a4d63d23a8df23e3bb2c70aa069b7563af08dfdffb \
|
||||
--hash=sha256:070fd0627ec4393011251a094e08ed9fdcc78cb4e7ab28f507638eee4e39abda \
|
||||
--hash=sha256:18ae4fe3bafb344dbd09f976d45cbf49c05c34416f2462828f9572c1fa6d5af7 \
|
||||
--hash=sha256:1ea7f07329f85a8eda4d8cf937b87f27f0ac392c6400f18bea2c667c8b7f8ecc \
|
||||
--hash=sha256:23ae267494fdd80f0d2a131beff890cf857f1b812ee72dbb96c3204aab725553 \
|
||||
--hash=sha256:2f4f2965c98ab254feddf6b5072854a6935adab7bc81412ec4fe238f07b85f62 \
|
||||
--hash=sha256:30ebbc5b76b4f0018988825a7e9ce153be4f0d4eba34e6c1f2fcded120573e88 \
|
||||
--hash=sha256:33e01e18e4561b0381b2c33d58e77ceee850a5067f0ece945064cbaac2176962 \
|
||||
--hash=sha256:38730927ad51beb42ab8dbc5555270bfbe86167ba734265f88bbd799fced1004 \
|
||||
--hash=sha256:4134b9fd70ac41954c080b772816bb1afe0c8354ee993015a83430031d686a4c \
|
||||
--hash=sha256:45e7c954546de4f85d895aa735989d77f87dd649f503ce1c8a71a151b092ed36 \
|
||||
--hash=sha256:4ab1537bd3b3bfbafd3c8847e06827129794488304f21945fc2f5b669649d94f \
|
||||
--hash=sha256:57fd20c5fc1a49d1bbd170836fccf9a338847e73664f8e313dce6ac91b8c1e02 \
|
||||
--hash=sha256:585b42eb37ab16a278c3a917ec23b2beef175aa669f4120142b97aebf90ef775 \
|
||||
--hash=sha256:6b56aa9eef830bf6443acd8c4e18b208a8993dc32e0d6ef4263ecfa6afb3f599 \
|
||||
--hash=sha256:6ea715bb3357ea1665f77874cf8f55385ff112553db06f3742d3cdcec08633f7 \
|
||||
--hash=sha256:714f9298c86f8e7278f1c6af23e509044782fa8220eb0260f8f8f1632f820550 \
|
||||
--hash=sha256:80dd27d7d680ea02c261c226acf1d41de2fd77af4fb2da62b278a9376e380de0 \
|
||||
--hash=sha256:8ccab8f7f7b82f9fa9fc3b0ba584d353bd5aa818d5821d77d5b9447faad2aaad \
|
||||
--hash=sha256:900912e8a7cf74b4a2bea18a3594ae0bf1138f99919c20017167b6e05f760aa4 \
|
||||
--hash=sha256:9b7d6dddfd01b49aedb940fdcaf32f41dc58c926ba35f4e31866aeec2f32f4f4 \
|
||||
--hash=sha256:a355223a284f42a723c120ce68827de66d5cb872a38732b3d5abbf544fa2fe26 \
|
||||
--hash=sha256:a760a175b46325b2bb33b1f2bbfb8aa21b48e1b9653e29c10b6834f9bb44ead4 \
|
||||
--hash=sha256:a8474c91de47733856c6686df3c4aca33753741da7e757979369c2c0d32918ba \
|
||||
--hash=sha256:b28228197775b7b5096898851d59ef43ccaf151136f81d9c436bc9ba560bc2ba \
|
||||
--hash=sha256:bd1add57b6fe1f96bed2d529de085e9378a3ac04b86f116d10506f85b68e97fc \
|
||||
--hash=sha256:d0be9f68240231e1e44118a4ebfecd8a5d4184f0bdf5c591c98dd6ade9720afd \
|
||||
--hash=sha256:d21d1a2892a2dcc193163dd13eaadabb2c1b803807a5117d8f8588b22eaf9f12 \
|
||||
--hash=sha256:d33a5105cd96ebd32c3e78d7ece6123a9d2fb7c18b84dec61f27837d9e0c496c \
|
||||
--hash=sha256:dac522788296a9a02a39f620970dea86c38e141e21e51238f1b5e9fa629f8e69 \
|
||||
--hash=sha256:dc64d6dfa7a89397529b22638939e70d85eaedc1bd68e30a29c78bfb65d4f715 \
|
||||
--hash=sha256:ddfc7194cd206496c445e9e5b0c47f970ce982c725c87bd22de028884125b68f \
|
||||
--hash=sha256:e3fc90f766401684740978cd781d73b9685bd81b5dbf7257542ef9de4612e4d2 \
|
||||
--hash=sha256:e43e9d48b2daf80e486213128b0763deed35bbb7a59b66d1681e205e1702d735 \
|
||||
--hash=sha256:e9cb387c33f014dae4db8cb4ba789c8d2a0a6d045ddff6be13f6c8d9def1d2a6 \
|
||||
--hash=sha256:e9ec5d45ea43684f87c316542af061ef5febc6a6b322928f059ce1fb289c298a \
|
||||
--hash=sha256:ed6eb9f8deaf25ee4f6fad9625d0955183fdc90c52b6f79a76b7f209af1b6e54 \
|
||||
--hash=sha256:f170abb8416c4efca48e76cac2c86c3185efdf841aecbe5c190121c42828ced0 \
|
||||
--hash=sha256:f4c21648d81e0dda38b4720dccc9006ae33b0e9e7ffe88af6bf7d4ec124e2fba \
|
||||
--hash=sha256:f5024d3ca2383470f7c4ef4d0ed8eabad0b22b23eeefde1c192cf1a38d5e9f78 \
|
||||
--hash=sha256:fff9f3a1ed63d45cb6514bfb8293005dc4141341ce3500abdfeb76124c0b9b2e
|
||||
lz4==4.4.5 \
|
||||
--hash=sha256:0846e6e78f374156ccf21c631de80967e03cc3c01c373c665789dc0c5431e7fc \
|
||||
--hash=sha256:0bba042ec5a61fa77c7e380351a61cb768277801240249841defd2ff0a10742f \
|
||||
--hash=sha256:12233624f1bc2cebc414f9efb3113a03e89acce3ab6f72035577bc61b270d24d \
|
||||
--hash=sha256:13254bd78fef50105872989a2dc3418ff09aefc7d0765528adc21646a7288294 \
|
||||
--hash=sha256:15551280f5656d2206b9b43262799c89b25a25460416ec554075a8dc568e4397 \
|
||||
--hash=sha256:1dd4d91d25937c2441b9fc0f4af01704a2d09f30a38c5798bc1d1b5a15ec9581 \
|
||||
--hash=sha256:214e37cfe270948ea7eb777229e211c601a3e0875541c1035ab408fbceaddf50 \
|
||||
--hash=sha256:216ca0c6c90719731c64f41cfbd6f27a736d7e50a10b70fad2a9c9b262ec923d \
|
||||
--hash=sha256:24092635f47538b392c4eaeff14c7270d2c8e806bf4be2a6446a378591c5e69e \
|
||||
--hash=sha256:28ccaeb7c5222454cd5f60fcd152564205bcb801bd80e125949d2dfbadc76bbd \
|
||||
--hash=sha256:2a2b7504d2dffed3fd19d4085fe1cc30cf221263fd01030819bdd8d2bb101cf1 \
|
||||
--hash=sha256:2c3ea562c3af274264444819ae9b14dbbf1ab070aff214a05e97db6896c7597e \
|
||||
--hash=sha256:33dd86cea8375d8e5dd001e41f321d0a4b1eb7985f39be1b6a4f466cd480b8a7 \
|
||||
--hash=sha256:3b84a42da86e8ad8537aabef062e7f661f4a877d1c74d65606c49d835d36d668 \
|
||||
--hash=sha256:451039b609b9a88a934800b5fc6ee401c89ad9c175abf2f4d9f8b2e4ef1afc64 \
|
||||
--hash=sha256:533298d208b58b651662dd972f52d807d48915176e5b032fb4f8c3b6f5fe535c \
|
||||
--hash=sha256:5f0b9e53c1e82e88c10d7c180069363980136b9d7a8306c4dca4f760d60c39f0 \
|
||||
--hash=sha256:609a69c68e7cfcfa9d894dc06be13f2e00761485b62df4e2472f1b66f7b405fb \
|
||||
--hash=sha256:61d0ee03e6c616f4a8b69987d03d514e8896c8b1b7cc7598ad029e5c6aedfd43 \
|
||||
--hash=sha256:66c5de72bf4988e1b284ebdd6524c4bead2c507a2d7f172201572bac6f593901 \
|
||||
--hash=sha256:67531da3b62f49c939e09d56492baf397175ff39926d0bd5bd2d191ac2bff95f \
|
||||
--hash=sha256:6bb05416444fafea170b07181bc70640975ecc2a8c92b3b658c554119519716c \
|
||||
--hash=sha256:6d0bf51e7745484d2092b3a51ae6eb58c3bd3ce0300cf2b2c14f76c536d5697a \
|
||||
--hash=sha256:713a777de88a73425cf08eb11f742cd2c98628e79a8673d6a52e3c5f0c116f33 \
|
||||
--hash=sha256:75419bb1a559af00250b8f1360d508444e80ed4b26d9d40ec5b09fe7875cb989 \
|
||||
--hash=sha256:7b62f94b523c251cf32aa4ab555f14d39bd1a9df385b72443fd76d7c7fb051f5 \
|
||||
--hash=sha256:7c4e7c44b6a31de77d4dc9772b7d2561937c9588a734681f70ec547cfbc51ecd \
|
||||
--hash=sha256:7dc1e1e2dbd872f8fae529acd5e4839efd0b141eaa8ae7ce835a9fe80fbad89f \
|
||||
--hash=sha256:83bc23ef65b6ae44f3287c38cbf82c269e2e96a26e560aa551735883388dcc4b \
|
||||
--hash=sha256:8a842ead8ca7c0ee2f396ca5d878c4c40439a527ebad2b996b0444f0074ed004 \
|
||||
--hash=sha256:92159782a4502858a21e0079d77cdcaade23e8a5d252ddf46b0652604300d7be \
|
||||
--hash=sha256:9b5e6abca8df9f9bdc5c3085f33ff32cdc86ed04c65e0355506d46a5ac19b6e9 \
|
||||
--hash=sha256:a1acbbba9edbcbb982bc2cac5e7108f0f553aebac1040fbec67a011a45afa1ba \
|
||||
--hash=sha256:a2af2897333b421360fdcce895c6f6281dc3fab018d19d341cf64d043fc8d90d \
|
||||
--hash=sha256:a482eecc0b7829c89b498fda883dbd50e98153a116de612ee7c111c8bcf82d1d \
|
||||
--hash=sha256:a5f197ffa6fc0e93207b0af71b302e0a2f6f29982e5de0fbda61606dd3a55832 \
|
||||
--hash=sha256:a88cbb729cc333334ccfb52f070463c21560fca63afcf636a9f160a55fac3301 \
|
||||
--hash=sha256:b424df1076e40d4e884cfcc4c77d815368b7fb9ebcd7e634f937725cd9a8a72a \
|
||||
--hash=sha256:bd85d118316b53ed73956435bee1997bd06cc66dd2fa74073e3b1322bd520a67 \
|
||||
--hash=sha256:c1cfa663468a189dab510ab231aad030970593f997746d7a324d40104db0d0a9 \
|
||||
--hash=sha256:c216b6d5275fc060c6280936bb3bb0e0be6126afb08abccde27eed23dead135f \
|
||||
--hash=sha256:c8e71b14938082ebaf78144f3b3917ac715f72d14c076f384a4c062df96f9df6 \
|
||||
--hash=sha256:cdd4bdcbaf35056086d910d219106f6a04e1ab0daa40ec0eeef1626c27d0fddb \
|
||||
--hash=sha256:d221fa421b389ab2345640a508db57da36947a437dfe31aeddb8d5c7b646c22d \
|
||||
--hash=sha256:d64141085864918392c3159cdad15b102a620a67975c786777874e1e90ef15ce \
|
||||
--hash=sha256:d6da84a26b3aa5da13a62e4b89ab36a396e9327de8cd48b436a3467077f8ccd4 \
|
||||
--hash=sha256:d994b87abaa7a88ceb7a37c90f547b8284ff9da694e6afcfaa8568d739faf3f7 \
|
||||
--hash=sha256:da68497f78953017deb20edff0dba95641cc86e7423dfadf7c0264e1ac60dc22 \
|
||||
--hash=sha256:daffa4807ef54b927451208f5f85750c545a4abbff03d740835fc444cd97f758 \
|
||||
--hash=sha256:df5aa4cead2044bab83e0ebae56e0944cc7fcc1505c7787e9e1057d6d549897e \
|
||||
--hash=sha256:e099ddfaa88f59dd8d36c8a3c66bd982b4984edf127eb18e30bb49bdba68ce67 \
|
||||
--hash=sha256:e64e61f29cf95afb43549063d8433b46352baf0c8a70aa45e2585618fcf59d86 \
|
||||
--hash=sha256:e928ec2d84dc8d13285b4a9288fd6246c5cde4f5f935b479f50d986911f085e3 \
|
||||
--hash=sha256:f32b9e65d70f3684532358255dc053f143835c5f5991e28a5ac4c93ce94b9ea7 \
|
||||
--hash=sha256:f6538aaaedd091d6e5abdaa19b99e6e82697d67518f114721b5248709b639fad \
|
||||
--hash=sha256:f9b8bde9909a010c75b3aea58ec3910393b758f3c219beed67063693df854db0 \
|
||||
--hash=sha256:ff1b50aeeec64df5603f17984e4b5be6166058dcf8f1e26a3da40d7a0f6ab547
|
||||
# via barman
|
||||
msal==1.33.0 \
|
||||
--hash=sha256:836ad80faa3e25a7d71015c990ce61f704a87328b1e73bcbb0623a18cbf17510 \
|
||||
--hash=sha256:c0cd41cecf8eaed733ee7e3be9e040291eba53b0f262d3ae9c58f38b04244273
|
||||
msal==1.34.0 \
|
||||
--hash=sha256:76ba83b716ea5a6d75b0279c0ac353a0e05b820ca1f6682c0eb7f45190c43c2f \
|
||||
--hash=sha256:f669b1644e4950115da7a176441b0e13ec2975c29528d8b9e81316023676d6e1
|
||||
# via
|
||||
# azure-identity
|
||||
# msal-extensions
|
||||
@ -542,31 +592,29 @@ proto-plus==1.26.1 \
|
||||
--hash=sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66 \
|
||||
--hash=sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012
|
||||
# via google-api-core
|
||||
protobuf==6.32.1 \
|
||||
--hash=sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346 \
|
||||
--hash=sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4 \
|
||||
--hash=sha256:68ff170bac18c8178f130d1ccb94700cf72852298e016a2443bdb9502279e5f1 \
|
||||
--hash=sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085 \
|
||||
--hash=sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1 \
|
||||
--hash=sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710 \
|
||||
--hash=sha256:d0975d0b2f3e6957111aa3935d08a0eb7e006b1505d825f862a1fffc8348e122 \
|
||||
--hash=sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281 \
|
||||
--hash=sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d
|
||||
protobuf==6.33.0 \
|
||||
--hash=sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954 \
|
||||
--hash=sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995 \
|
||||
--hash=sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef \
|
||||
--hash=sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455 \
|
||||
--hash=sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee \
|
||||
--hash=sha256:c963e86c3655af3a917962c9619e1a6b9670540351d7af9439d06064e3317cc9 \
|
||||
--hash=sha256:cd33a8e38ea3e39df66e1bbc462b076d6e5ba3a4ebbde58219d777223a7873d3 \
|
||||
--hash=sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035 \
|
||||
--hash=sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90 \
|
||||
--hash=sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298
|
||||
# via
|
||||
# google-api-core
|
||||
# googleapis-common-protos
|
||||
# proto-plus
|
||||
psycopg2==2.9.10 \
|
||||
--hash=sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4 \
|
||||
--hash=sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11 \
|
||||
--hash=sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2 \
|
||||
--hash=sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e \
|
||||
--hash=sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716 \
|
||||
--hash=sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067 \
|
||||
--hash=sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442 \
|
||||
--hash=sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2 \
|
||||
--hash=sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b \
|
||||
--hash=sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a
|
||||
psycopg2==2.9.11 \
|
||||
--hash=sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8 \
|
||||
--hash=sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb \
|
||||
--hash=sha256:6ecddcf573777536bddfefaea8079ce959287798c8f5804bee6933635d538924 \
|
||||
--hash=sha256:8dc379166b5b7d5ea66dcebf433011dfc51a7bb8a5fc12367fa05668e5fc53c8 \
|
||||
--hash=sha256:964d31caf728e217c697ff77ea69c2ba0865fa41ec20bb00f0977e62fdcc52e3 \
|
||||
--hash=sha256:e03e4a6dbe87ff81540b434f2e5dc2bddad10296db5eea7bdc995bf5f4162938 \
|
||||
--hash=sha256:f10a48acba5fe6e312b891f290b4d2ca595fc9a06850fe53320beac353575578
|
||||
# via barman
|
||||
pyasn1==0.6.1 \
|
||||
--hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \
|
||||
@ -617,9 +665,7 @@ s3transfer==0.14.0 \
|
||||
six==1.17.0 \
|
||||
--hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \
|
||||
--hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81
|
||||
# via
|
||||
# azure-core
|
||||
# python-dateutil
|
||||
# via python-dateutil
|
||||
typing-extensions==4.15.0 \
|
||||
--hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \
|
||||
--hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548
|
||||
@ -633,6 +679,10 @@ urllib3==2.5.0 \
|
||||
# via
|
||||
# botocore
|
||||
# requests
|
||||
zipp==3.23.0 \
|
||||
--hash=sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e \
|
||||
--hash=sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166
|
||||
# via -r sidecar-requirements.in
|
||||
zstandard==0.25.0 \
|
||||
--hash=sha256:011d388c76b11a0c165374ce660ce2c8efa8e5d87f34996aa80f9c0816698b64 \
|
||||
--hash=sha256:01582723b3ccd6939ab7b3a78622c573799d5d8737b534b86d0e06ac18dbde4a \
|
||||
|
||||
31
go.mod
31
go.mod
@ -2,17 +2,17 @@ module github.com/cloudnative-pg/plugin-barman-cloud
|
||||
|
||||
go 1.25.0
|
||||
|
||||
toolchain go1.25.2
|
||||
toolchain go1.25.4
|
||||
|
||||
require (
|
||||
github.com/cert-manager/cert-manager v1.19.0
|
||||
github.com/cert-manager/cert-manager v1.19.1
|
||||
github.com/cloudnative-pg/api v1.27.0
|
||||
github.com/cloudnative-pg/barman-cloud v0.3.3
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.0
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.1
|
||||
github.com/cloudnative-pg/cnpg-i v0.3.0
|
||||
github.com/cloudnative-pg/cnpg-i-machinery v0.4.1
|
||||
github.com/cloudnative-pg/machinery v0.3.1
|
||||
github.com/onsi/ginkgo/v2 v2.26.0
|
||||
github.com/onsi/ginkgo/v2 v2.27.2
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
@ -23,7 +23,7 @@ require (
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
|
||||
sigs.k8s.io/controller-runtime v0.22.3
|
||||
sigs.k8s.io/controller-runtime v0.22.4
|
||||
sigs.k8s.io/kustomize/api v0.20.1
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1
|
||||
)
|
||||
@ -48,8 +48,18 @@ require (
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.22.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/go-openapi/swag v0.24.1 // indirect
|
||||
github.com/go-openapi/swag/cmdutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/conv v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/fileutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.1 // indirect
|
||||
github.com/go-openapi/swag/jsonutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/loading v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/mangling v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/netutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/stringutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.24.0 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.24.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
@ -76,7 +86,7 @@ require (
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 // indirect
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.1 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
@ -101,18 +111,17 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/oauth2 v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
|
||||
64
go.sum
64
go.sum
@ -12,16 +12,16 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cert-manager/cert-manager v1.19.0 h1:pPKtK06zPriliwq7B12xa7DGKbc4SvnfOjE0BXaAspk=
|
||||
github.com/cert-manager/cert-manager v1.19.0/go.mod h1:7VBp/ihp5Xgz/ZJuGy1fNIWbDouQKBonvEyWoJLuhmA=
|
||||
github.com/cert-manager/cert-manager v1.19.1 h1:Txh8L/nLWTDcb7ZnXuXbTe15BxQnLbLirXmbNk0fGgY=
|
||||
github.com/cert-manager/cert-manager v1.19.1/go.mod h1:8Ps1VXCQRGKT8zNvLQlhDK1gFKWmYKdIPQFmvTS2JeA=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudnative-pg/api v1.27.0 h1:uSUkF9X/0UZu1Xn5qI33qHVmzZrDKuuyoiRlsOmSTv4=
|
||||
github.com/cloudnative-pg/api v1.27.0/go.mod h1:IWyAmuirffHiw6iIGD1p18BmZNb13TK9Os/wkp8ltDg=
|
||||
github.com/cloudnative-pg/barman-cloud v0.3.3 h1:EEcjeV+IUivDpmyF/H/XGY1pGaKJ5LS5MYeB6wgGcak=
|
||||
github.com/cloudnative-pg/barman-cloud v0.3.3/go.mod h1:5CM4MncAxAjnqxjDt0I5E/oVd7gsMLL0/o/wQ+vUSgs=
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.0 h1:mp0zlYO4vKAJmQdArWORWKr1lmB4Hj8JeBQGX5RBnpg=
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.0/go.mod h1:aIVMy0BrfXg+eJuA/ZyRGyOQtPtshw/sCzQnQn3KFUo=
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.1 h1:w+bbtXyEPoaa7sZGXxbb8qJ+/bUGWQ3M48kbNUEpKlk=
|
||||
github.com/cloudnative-pg/cloudnative-pg v1.27.1/go.mod h1:XbwCAlCm5fr+/A+v+qvMp8DHzVtJr2m0Y/TpKALw+Bk=
|
||||
github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE=
|
||||
github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g=
|
||||
github.com/cloudnative-pg/cnpg-i-machinery v0.4.1 h1:M4ELhZO0e7UjiV3PZ3uGJOz3SgG/3ThWiqkbrFCcXNE=
|
||||
@ -51,8 +51,8 @@ github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BN
|
||||
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
|
||||
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
|
||||
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
|
||||
github.com/gkampitakis/go-snaps v0.5.14 h1:3fAqdB6BCPKHDMHAKRwtPUwYexKtGrNuw8HX/T/4neo=
|
||||
github.com/gkampitakis/go-snaps v0.5.14/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
|
||||
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
|
||||
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
|
||||
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
||||
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-faker/faker/v4 v4.4.1 h1:LY1jDgjVkBZWIhATCt+gkl0x9i/7wC61gZx73GTFb+Q=
|
||||
@ -68,10 +68,30 @@ github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo
|
||||
github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
|
||||
github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU=
|
||||
github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8=
|
||||
github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A=
|
||||
github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I=
|
||||
github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8=
|
||||
github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik=
|
||||
github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c=
|
||||
github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak=
|
||||
github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90=
|
||||
github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
|
||||
github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
|
||||
github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts=
|
||||
github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0=
|
||||
github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc=
|
||||
github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk=
|
||||
github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk=
|
||||
github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc=
|
||||
github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w=
|
||||
github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM=
|
||||
github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM=
|
||||
github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w=
|
||||
github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw=
|
||||
github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI=
|
||||
github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c=
|
||||
github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
@ -145,8 +165,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE=
|
||||
github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
@ -156,10 +176,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 h1:NEQAo0Cl1gf9sJ3oI1QzczS3BF6ySvSSH36mwOZDuhI=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.1 h1:j/GvU9UxlK5nuUKOWYOY0LRqcfHZl1ffTOa46+00Cys=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.1/go.mod h1:nPk0OteXBkbT0CRCa2oZQL1jRLW6RJ2fuIijHypeJdk=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
@ -243,16 +261,14 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@ -280,10 +296,10 @@ golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
@ -343,8 +359,8 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y=
|
||||
sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
|
||||
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/gateway-api v1.4.0 h1:ZwlNM6zOHq0h3WUX2gfByPs2yAEsy/EenYJB78jpQfQ=
|
||||
sigs.k8s.io/gateway-api v1.4.0/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
|
||||
@ -428,7 +428,14 @@ func isStreamingAvailable(cluster *cnpgv1.Cluster, podName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Easy case: If this pod is a replica, the streaming is always available
|
||||
// Easy case take 1: we are helping PostgreSQL to create the first
|
||||
// instance of a Cluster. No streaming connection is possible.
|
||||
if cluster.Status.CurrentPrimary == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Easy case take 2: If this pod is a replica, the streaming is always
|
||||
// available
|
||||
if cluster.Status.CurrentPrimary != podName {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ const (
|
||||
// Data is the metadata of this plugin.
|
||||
var Data = identity.GetPluginMetadataResponse{
|
||||
Name: PluginName,
|
||||
Version: "0.7.0", // x-release-please-version
|
||||
Version: "0.9.0", // x-release-please-version
|
||||
DisplayName: "BarmanCloudInstance",
|
||||
ProjectUrl: "https://github.com/cloudnative-pg/plugin-barman-cloud",
|
||||
RepositoryUrl: "https://github.com/cloudnative-pg/plugin-barman-cloud",
|
||||
|
||||
@ -119,7 +119,7 @@ func Start(ctx context.Context) error {
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
|
||||
@ -158,7 +158,6 @@ func (impl JobHookImpl) Restore(
|
||||
config := getRestoreWalConfig()
|
||||
|
||||
contextLogger.Info("sending restore response", "config", config)
|
||||
contextLogger.Trace("sending restore response", "env", env)
|
||||
return &restore.RestoreResponse{
|
||||
RestoreConfig: config,
|
||||
Envs: nil,
|
||||
|
||||
@ -707,7 +707,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
name: leader-election-role
|
||||
name: barman-plugin-leader-election-role
|
||||
namespace: cnpg-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
@ -745,7 +745,7 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-auth-role
|
||||
name: barman-plugin-metrics-auth-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
@ -763,7 +763,7 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
name: barman-plugin-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
@ -776,7 +776,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
name: objectstore-editor-role
|
||||
name: barman-plugin-objectstore-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- barmancloud.cnpg.io
|
||||
@ -803,7 +803,7 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
name: objectstore-viewer-role
|
||||
name: barman-plugin-objectstore-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- barmancloud.cnpg.io
|
||||
@ -894,12 +894,12 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/name: plugin-barman-cloud
|
||||
name: leader-election-rolebinding
|
||||
name: barman-plugin-leader-election-rolebinding
|
||||
namespace: cnpg-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
name: barman-plugin-leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: plugin-barman-cloud
|
||||
@ -908,11 +908,11 @@ subjects:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-auth-rolebinding
|
||||
name: barman-plugin-metrics-auth-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metrics-auth-role
|
||||
name: barman-plugin-metrics-auth-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: plugin-barman-cloud
|
||||
|
||||
@ -257,3 +257,18 @@ spec:
|
||||
barmanObjectName: minio-eu
|
||||
serverName: pg-eu
|
||||
```
|
||||
|
||||
## Step 5: Verify your metrics
|
||||
|
||||
When migrating from the in-core solution to the plugin-based approach, you need
|
||||
to monitor a different set of metrics, as described in the
|
||||
["Observability"](observability.md) section.
|
||||
|
||||
The table below summarizes the name changes between the old in-core metrics and
|
||||
the new plugin-based ones:
|
||||
|
||||
| Old metric name | New metric name |
|
||||
| ------------------------------------------------ | ---------------------------------------------------------------- |
|
||||
| `cnpg_collector_last_failed_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp` |
|
||||
| `cnpg_collector_last_available_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp` |
|
||||
| `cnpg_collector_first_recoverability_point` | `barman_cloud_cloudnative_pg_io_first_recoverability_point` |
|
||||
|
||||
@ -39,7 +39,7 @@ You can pass additional command-line arguments to `barman-cloud-backup` and
|
||||
`ObjectStore` configuration.
|
||||
|
||||
- `.spec.configuration.data.additionalCommandArgs`: for `barman-cloud-backup`
|
||||
- `.spec.configuration.wal.additionalCommandArgs`: for `barman-cloud-wal-archive`
|
||||
- `.spec.configuration.wal.archiveAdditionalCommandArgs`: for `barman-cloud-wal-archive`
|
||||
|
||||
Each field accepts a list of string arguments. If an argument is already
|
||||
configured elsewhere in the plugin, the duplicate will be ignored.
|
||||
@ -67,7 +67,7 @@ metadata:
|
||||
spec:
|
||||
configuration:
|
||||
wal:
|
||||
additionalCommandArgs:
|
||||
archiveAdditionalCommandArgs:
|
||||
- "--max-concurrency=1"
|
||||
- "--read-timeout=60"
|
||||
```
|
||||
|
||||
24
web/docs/observability.md
Normal file
24
web/docs/observability.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 55
|
||||
---
|
||||
|
||||
# Observability
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin exposes the following metrics through the native
|
||||
Prometheus exporter of the instance manager:
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent failed backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent successfully available backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_first_recoverability_point`:
|
||||
the UNIX timestamp representing the earliest point in time from which the
|
||||
cluster can be recovered.
|
||||
|
||||
These metrics supersede the previously available in-core metrics that used the
|
||||
`cnpg_collector` prefix. The new metrics are exposed under the
|
||||
`barman_cloud_cloudnative_pg_io` prefix instead.
|
||||
219
web/docs/resource-name-migration.md
Normal file
219
web/docs/resource-name-migration.md
Normal file
@ -0,0 +1,219 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Resource name migration guide
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
:::warning
|
||||
Before proceeding with the migration process, please:
|
||||
1. **Read this guide in its entirety** to understand what changes will be made
|
||||
2. **Test in a non-production environment** first if possible
|
||||
3. **Ensure you have proper backups** of your cluster configuration
|
||||
|
||||
This migration will delete old RBAC resources only after the
|
||||
`plugin-barman-cloud` upgrade. While the operation is designed to be safe, you
|
||||
should review and understand the changes before proceeding. The maintainers of
|
||||
this project are not responsible for any issues that may arise during
|
||||
migration.
|
||||
|
||||
**Note:** This guide assumes you are using the default `cnpg-system` namespace.
|
||||
:::
|
||||
|
||||
## Overview
|
||||
|
||||
Starting from version **0.8.0**, the `plugin-barman-cloud` deployment manifests
|
||||
use more specific, prefixed resource names to avoid conflicts with other
|
||||
components deployed in the same Kubernetes cluster.
|
||||
|
||||
## What Changed
|
||||
|
||||
The following resources have been renamed to use proper prefixes.
|
||||
|
||||
### Cluster-scoped Resources
|
||||
|
||||
| Old Name | New Name |
|
||||
|----------------------------|------------------------------------------|
|
||||
| `metrics-auth-role` | `barman-plugin-metrics-auth-role` |
|
||||
| `metrics-auth-rolebinding` | `barman-plugin-metrics-auth-rolebinding` |
|
||||
| `metrics-reader` | `barman-plugin-metrics-reader` |
|
||||
| `objectstore-viewer-role` | `barman-plugin-objectstore-viewer-role` |
|
||||
| `objectstore-editor-role` | `barman-plugin-objectstore-editor-role` |
|
||||
|
||||
### Namespace-scoped Resources
|
||||
|
||||
| Old Name | New Name | Namespace |
|
||||
|-------------------------------|---------------------------------------------|---------------|
|
||||
| `leader-election-role` | `barman-plugin-leader-election-role` | `cnpg-system` |
|
||||
| `leader-election-rolebinding` | `barman-plugin-leader-election-rolebinding` | `cnpg-system` |
|
||||
|
||||
## Why This Change?
|
||||
|
||||
Using generic names for cluster-wide resources is discouraged as they may
|
||||
conflict with other components deployed in the same cluster. The new names make
|
||||
it clear that these resources belong to the Barman Cloud plugin and help avoid
|
||||
naming collisions.
|
||||
|
||||
## Migration Instructions
|
||||
|
||||
This three steps migration process is straightforward and can be completed with
|
||||
a few `kubectl` commands.
|
||||
|
||||
### Step 1: Upgrade plugin-barman-cloud
|
||||
|
||||
Please refer to the [Installation](installation.mdx) section to deploy the new
|
||||
`plugin-barman-cloud` release.
|
||||
|
||||
### Step 2: Delete Old Cluster-scoped Resources
|
||||
|
||||
:::danger Verify Resources Before Deletion
|
||||
**IMPORTANT**: The old resource names are generic and could potentially belong
|
||||
to other components in your cluster.
|
||||
|
||||
**Before deleting each resource, verify it belongs to the Barman Cloud plugin
|
||||
by checking:**
|
||||
- For `objectstore-*` roles: Look for `barmancloud.cnpg.io` in the API groups
|
||||
- For `metrics-*` roles: Check if they reference the `plugin-barman-cloud`
|
||||
ServiceAccount in `cnpg-system` namespace
|
||||
- For other roles: Look for labels like `app.kubernetes.io/name: plugin-barman-cloud`
|
||||
|
||||
If a resource doesn't have these indicators, **DO NOT DELETE IT** as it may
|
||||
belong to another application.
|
||||
|
||||
Carefully review the output of each verification command before proceeding with
|
||||
the `delete`.
|
||||
:::
|
||||
|
||||
:::tip Dry Run First
|
||||
You can add `--dry-run=client` to any `kubectl delete` command to preview what
|
||||
would be deleted without actually removing anything.
|
||||
:::
|
||||
|
||||
**Only proceed if you've verified these resources belong to the Barman Cloud
|
||||
plugin (see warning above).**
|
||||
|
||||
For each resource below, first verify it belongs to Barman Cloud, then delete
|
||||
it:
|
||||
|
||||
```bash
|
||||
# 1. Check metrics-auth-rolebinding FIRST (we'll check the role after)
|
||||
# Look for references to plugin-barman-cloud ServiceAccount
|
||||
kubectl describe clusterrolebinding metrics-auth-rolebinding
|
||||
# If it references plugin-barman-cloud ServiceAccount in cnpg-system namespace,
|
||||
# delete it:
|
||||
kubectl delete clusterrolebinding metrics-auth-rolebinding
|
||||
|
||||
# 2. Check metrics-auth-role
|
||||
# Look for references to authentication.k8s.io and authorization.k8s.io
|
||||
kubectl describe clusterrole metrics-auth-role
|
||||
# Verify it's not being used by any other rolebindings:
|
||||
kubectl get clusterrolebinding -o json \
|
||||
| jq -r '.items[] | select(.roleRef.name=="metrics-auth-role") \
|
||||
| .metadata.name'
|
||||
# If the above returns nothing (role is not in use) and the role looks like the
|
||||
# Barman Cloud one, delete it (see warnings section):
|
||||
kubectl delete clusterrole metrics-auth-role
|
||||
|
||||
# 3. Check objectstore-viewer-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-viewer-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-viewer-role
|
||||
|
||||
# 4. Check objectstore-editor-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-editor-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-editor-role
|
||||
|
||||
# 5. Check metrics-reader (MOST DANGEROUS - very generic name)
|
||||
# First, check if it's being used by any rolebindings OTHER than barman's:
|
||||
kubectl get clusterrolebinding -o json | jq -r '.items[] \
|
||||
| select(.roleRef.name=="metrics-reader") \
|
||||
| "\(.metadata.name) -> \(.subjects[0].name) in \(.subjects[0].namespace)"'
|
||||
# If this shows ANY rolebindings, review them carefully. Only proceed if
|
||||
# they're all Barman-related. Then check the role itself:
|
||||
kubectl describe clusterrole metrics-reader
|
||||
# If it ONLY has nonResourceURLs: /metrics and NO other rolebindings use it,
|
||||
# delete it:
|
||||
kubectl delete clusterrole metrics-reader
|
||||
```
|
||||
|
||||
:::warning
|
||||
The `metrics-reader` role is particularly dangerous to delete blindly. Many
|
||||
monitoring systems use this exact name. Only delete it if:
|
||||
|
||||
1. You've verified it ONLY grants access to `/metrics`
|
||||
2. No other rolebindings reference it (checked with the jq command above)
|
||||
3. You're certain it was created by the Barman Cloud plugin
|
||||
|
||||
If you're unsure, it's safer to leave it and let the new
|
||||
`barman-plugin-metrics-reader` role coexist with it.
|
||||
:::
|
||||
|
||||
If any resource is not found during the `describe` command, that's okay - it
|
||||
means it was never created or already deleted. Simply skip the delete command
|
||||
for that resource.
|
||||
|
||||
### Step 3: Delete Old Namespace-scoped Resources
|
||||
|
||||
Delete the old namespace-scoped resources in the `cnpg-system` namespace:
|
||||
|
||||
```bash
|
||||
# Delete the old leader-election resources
|
||||
kubectl delete role leader-election-role -n cnpg-system
|
||||
kubectl delete rolebinding leader-election-rolebinding -n cnpg-system
|
||||
```
|
||||
|
||||
If any resource is not found, that's okay - it means it was never created or
|
||||
already deleted.
|
||||
|
||||
## Impact
|
||||
|
||||
- **Permissions:** If you have custom RBAC rules or tools that reference the
|
||||
old resource names, they will need to be updated.
|
||||
- **External Users:** If end users have been granted the
|
||||
`objectstore-viewer-role` or `objectstore-editor-role`, they will need to be
|
||||
re-granted the new role names (`barman-plugin-objectstore-viewer-role` and
|
||||
`barman-plugin-objectstore-editor-role`).
|
||||
|
||||
## Verification
|
||||
|
||||
After migration, verify that the new resources are created:
|
||||
|
||||
```bash
|
||||
# Check cluster-scoped resources
|
||||
kubectl get clusterrole | grep barman
|
||||
kubectl get clusterrolebinding | grep barman
|
||||
|
||||
# Check namespace-scoped resources
|
||||
kubectl get role,rolebinding -n cnpg-system | grep barman
|
||||
```
|
||||
|
||||
You should see the new prefixed resource names.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Plugin Not Starting After Migration
|
||||
|
||||
If the plugin fails to start after migration, check:
|
||||
|
||||
1. **ServiceAccount permissions:** Ensure the `plugin-barman-cloud` ServiceAccount is bound to the new roles:
|
||||
```bash
|
||||
kubectl get clusterrolebinding barman-plugin-metrics-auth-rolebinding -o yaml
|
||||
kubectl get rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system -o yaml
|
||||
```
|
||||
|
||||
2. **Role references:** Verify that the rolebindings reference the correct role names:
|
||||
```bash
|
||||
kubectl describe rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system
|
||||
kubectl describe clusterrolebinding barman-plugin-metrics-auth-rolebinding
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues during migration, please open an issue on the [GitHub
|
||||
repository](https://github.com/cloudnative-pg/plugin-barman-cloud/issues).
|
||||
16
web/docs/upgrades.mdx
Normal file
16
web/docs/upgrades.mdx
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 25
|
||||
---
|
||||
|
||||
# Upgrades
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
You can upgrade the plugin simply by installing the new version. Unless
|
||||
explicitly stated below or in the release notes, no special steps are required.
|
||||
|
||||
## Upgrading to version 0.8.x from previous versions
|
||||
|
||||
Version **0.8.0** introduces breaking changes to resource naming.
|
||||
To complete the upgrade successfully, follow the instructions in the
|
||||
["Resource name migration guide"](resource-name-migration.md).
|
||||
@ -15,8 +15,8 @@
|
||||
"typecheck": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "3.9.1",
|
||||
"@docusaurus/preset-classic": "3.9.1",
|
||||
"@docusaurus/core": "3.9.2",
|
||||
"@docusaurus/preset-classic": "3.9.2",
|
||||
"@easyops-cn/docusaurus-search-local": "^0.52.0",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.0.0",
|
||||
@ -25,9 +25,9 @@
|
||||
"react-dom": "^19.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "3.9.1",
|
||||
"@docusaurus/tsconfig": "3.9.1",
|
||||
"@docusaurus/types": "3.9.1",
|
||||
"@docusaurus/module-type-aliases": "3.9.2",
|
||||
"@docusaurus/tsconfig": "3.9.2",
|
||||
"@docusaurus/types": "3.9.2",
|
||||
"typescript": "~5.9.0"
|
||||
},
|
||||
"browserslist": {
|
||||
|
||||
43
web/versioned_docs/version-0.8.0/compression.md
Normal file
43
web/versioned_docs/version-0.8.0/compression.md
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
sidebar_position: 80
|
||||
---
|
||||
|
||||
# Compression
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
By default, backups and WAL files are archived **uncompressed**. However, the
|
||||
Barman Cloud Plugin supports multiple compression algorithms via
|
||||
`barman-cloud-backup` and `barman-cloud-wal-archive`, allowing you to optimize
|
||||
for space, speed, or a balance of both.
|
||||
|
||||
### Supported Compression Algorithms
|
||||
|
||||
- `bzip2`
|
||||
- `gzip`
|
||||
- `lz4` (WAL only)
|
||||
- `snappy`
|
||||
- `xz` (WAL only)
|
||||
- `zstd` (WAL only)
|
||||
|
||||
Compression settings for base backups and WAL archives are configured
|
||||
independently. For implementation details, refer to the corresponding API
|
||||
definitions:
|
||||
|
||||
- [`DataBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration)
|
||||
- [`WALBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration)
|
||||
|
||||
:::important
|
||||
Compression impacts both performance and storage efficiency. Choose the right
|
||||
algorithm based on your recovery time objectives (RTO), storage capacity, and
|
||||
network throughput.
|
||||
:::
|
||||
|
||||
## Compression Benchmark (on MinIO)
|
||||
|
||||
| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed Size (MB) | Compressed Size (MB) | Ratio |
|
||||
| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ----- |
|
||||
| None | 10,927 | 7,553 | 395 | 395 | 1.0:1 |
|
||||
| bzip2 | 25,404 | 13,886 | 395 | 67 | 5.9:1 |
|
||||
| gzip | 116,281 | 3,077 | 395 | 91 | 4.3:1 |
|
||||
| snappy | 8,134 | 8,341 | 395 | 166 | 2.4:1 |
|
||||
177
web/versioned_docs/version-0.8.0/concepts.md
Normal file
177
web/versioned_docs/version-0.8.0/concepts.md
Normal file
@ -0,0 +1,177 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
---
|
||||
|
||||
# Main Concepts
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
:::important
|
||||
Before proceeding, make sure to review the following sections of the
|
||||
CloudNativePG documentation:
|
||||
|
||||
- [**Backup**](https://cloudnative-pg.io/documentation/current/backup/)
|
||||
- [**WAL Archiving**](https://cloudnative-pg.io/documentation/current/wal_archiving/)
|
||||
- [**Recovery**](https://cloudnative-pg.io/documentation/current/recovery/)
|
||||
:::
|
||||
|
||||
The **Barman Cloud Plugin** enables **hot (online) backups** of PostgreSQL
|
||||
clusters in CloudNativePG through [`barman-cloud`](https://pgbarman.org),
|
||||
supporting continuous physical backups and WAL archiving to an **object
|
||||
store**—without interrupting write operations.
|
||||
|
||||
It also supports both **full recovery** and **Point-in-Time Recovery (PITR)**
|
||||
of a PostgreSQL cluster.
|
||||
|
||||
## The Object Store
|
||||
|
||||
At the core is the [`ObjectStore` custom resource (CRD)](plugin-barman-cloud.v1.md#objectstorespec),
|
||||
which acts as the interface between the PostgreSQL cluster and the target
|
||||
object storage system. It allows you to configure:
|
||||
|
||||
- **Authentication and bucket location** via the `.spec.configuration` section
|
||||
- **WAL archiving** settings—such as compression type, parallelism, and
|
||||
server-side encryption—under `.spec.configuration.wal`
|
||||
- **Base backup options**—with similar settings for compression, concurrency,
|
||||
and encryption—under `.spec.configuration.data`
|
||||
- **Retention policies** to manage the life-cycle of archived WALs and backups
|
||||
via `.spec.configuration.retentionPolicy`
|
||||
|
||||
WAL files are archived in the `wals` directory, while base backups are stored
|
||||
as **tarballs** in the `base` directory, following the
|
||||
[Barman Cloud convention](https://docs.pgbarman.org/cloud/latest/usage/#object-store-layout).
|
||||
|
||||
The plugin also offers advanced capabilities, including
|
||||
[backup tagging](misc.md#backup-object-tagging) and
|
||||
[extra options for backups and WAL archiving](misc.md#extra-options-for-backup-and-wal-archiving).
|
||||
|
||||
:::tip
|
||||
For details, refer to the
|
||||
[API reference for the `ObjectStore` resource](plugin-barman-cloud.v1.md#objectstorespec).
|
||||
:::
|
||||
|
||||
## Integration with a CloudNativePG Cluster
|
||||
|
||||
CloudNativePG can delegate continuous backup and recovery responsibilities to
|
||||
the **Barman Cloud Plugin** by configuring the `.spec.plugins` section of a
|
||||
`Cluster` resource. This setup requires a corresponding `ObjectStore` resource
|
||||
to be defined.
|
||||
|
||||
:::important
|
||||
While it is technically possible to reuse the same `ObjectStore` for multiple
|
||||
`Cluster` resources within the same namespace, it is strongly recommended to
|
||||
dedicate one object store per PostgreSQL cluster to ensure data isolation and
|
||||
operational clarity.
|
||||
:::
|
||||
|
||||
The following example demonstrates how to configure a CloudNativePG cluster
|
||||
named `cluster-example` to use a previously defined `ObjectStore` (also named
|
||||
`cluster-example`) in the same namespace. Setting `isWALArchiver: true` enables
|
||||
WAL archiving through the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
# Other cluster settings...
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: cluster-example
|
||||
```
|
||||
|
||||
## Backup of a Postgres Cluster
|
||||
|
||||
Once the object store is defined and the `Cluster` is configured to use the
|
||||
Barman Cloud Plugin, **WAL archiving is activated immediately** on the
|
||||
PostgreSQL primary.
|
||||
|
||||
Physical base backups are seamlessly managed by CloudNativePG using the
|
||||
`Backup` and `ScheduledBackup` resources, respectively for
|
||||
[on-demand](https://cloudnative-pg.io/documentation/current/backup/#on-demand-backups)
|
||||
and
|
||||
[scheduled](https://cloudnative-pg.io/documentation/current/backup/#scheduled-backups)
|
||||
backups.
|
||||
|
||||
To use the Barman Cloud Plugin, you must set the `method` to `plugin` and
|
||||
configure the `pluginConfiguration` section as shown:
|
||||
|
||||
```yaml
|
||||
[...]
|
||||
spec:
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
[...]
|
||||
```
|
||||
|
||||
With this configuration, CloudNativePG supports:
|
||||
|
||||
- Backups from both **primary** and **standby** instances
|
||||
- Backups from **designated primaries** in a distributed topology using
|
||||
[replica clusters](https://cloudnative-pg.io/documentation/current/replica_cluster/)
|
||||
|
||||
:::tip
|
||||
For details on how to back up from a standby, refer to the official documentation:
|
||||
[Backup from a standby](https://cloudnative-pg.io/documentation/current/backup/#backup-from-a-standby).
|
||||
:::
|
||||
|
||||
:::important
|
||||
Both backup and WAL archiving operations are executed by sidecar containers
|
||||
running in the same pod as the PostgreSQL `Cluster` primary instance—except
|
||||
when backups are taken from a standby, in which case the sidecar runs alongside
|
||||
the standby pod.
|
||||
The sidecar containers use a [dedicated container image](images.md) that
|
||||
includes only the supported version of Barman Cloud.
|
||||
:::
|
||||
|
||||
## Recovery of a Postgres Cluster
|
||||
|
||||
In PostgreSQL, *recovery* refers to the process of starting a database instance
|
||||
from an existing backup. The Barman Cloud Plugin integrates with CloudNativePG
|
||||
to support both **full recovery** and **Point-in-Time Recovery (PITR)** from an
|
||||
object store.
|
||||
|
||||
Recovery in this context is *not in-place*: it bootstraps a brand-new
|
||||
PostgreSQL cluster from a backup and replays the necessary WAL files to reach
|
||||
the desired recovery target.
|
||||
|
||||
To perform a recovery, define an *external cluster* that references the
|
||||
appropriate `ObjectStore`, and use it as the source in the `bootstrap` section
|
||||
of the target cluster:
|
||||
|
||||
```yaml
|
||||
[...]
|
||||
spec:
|
||||
[...]
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: cluster-example
|
||||
serverName: cluster-example
|
||||
[...]
|
||||
```
|
||||
|
||||
The critical element here is the `externalClusters` section of the `Cluster`
|
||||
resource, where the `plugin` stanza instructs CloudNativePG to use the Barman
|
||||
Cloud Plugin to access the object store for recovery.
|
||||
|
||||
This same mechanism can be used for a variety of scenarios enabled by the
|
||||
CloudNativePG API, including:
|
||||
|
||||
* **Full cluster recovery** from the latest backup
|
||||
* **Point-in-Time Recovery (PITR)**
|
||||
* Bootstrapping **replica clusters** in a distributed topology
|
||||
|
||||
:::tip
|
||||
For complete instructions and advanced use cases, refer to the official
|
||||
[Recovery documentation](https://cloudnative-pg.io/documentation/current/recovery/).
|
||||
:::
|
||||
37
web/versioned_docs/version-0.8.0/images.md
Normal file
37
web/versioned_docs/version-0.8.0/images.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
sidebar_position: 99
|
||||
---
|
||||
|
||||
# Container Images
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin is distributed using two container images:
|
||||
|
||||
- One for deploying the plugin components
|
||||
- One for the sidecar that runs alongside each PostgreSQL instance in a
|
||||
CloudNativePG `Cluster` using the plugin
|
||||
|
||||
## Plugin Container Image
|
||||
|
||||
The plugin image contains the logic required to operate the Barman Cloud Plugin
|
||||
within your Kubernetes environment with CloudNativePG. It is published on the
|
||||
GitHub Container Registry at `ghcr.io/cloudnative-pg/plugin-barman-cloud`.
|
||||
|
||||
This image is built from the
|
||||
[`Dockerfile.plugin`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.plugin)
|
||||
in the plugin repository.
|
||||
|
||||
## Sidecar Container Image
|
||||
|
||||
The sidecar image is used within each PostgreSQL pod in the cluster. It
|
||||
includes the latest supported version of Barman Cloud and is responsible for
|
||||
performing WAL archiving and backups on behalf of CloudNativePG.
|
||||
|
||||
It is available at `ghcr.io/cloudnative-pg/plugin-barman-cloud-sidecar` and is
|
||||
built from the
|
||||
[`Dockerfile.sidecar`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.sidecar).
|
||||
|
||||
These sidecar images are designed to work seamlessly with the
|
||||
[`minimal` PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images)
|
||||
maintained by the CloudNativePG Community.
|
||||
109
web/versioned_docs/version-0.8.0/installation.mdx
Normal file
109
web/versioned_docs/version-0.8.0/installation.mdx
Normal file
@ -0,0 +1,109 @@
|
||||
---
|
||||
sidebar_position: 20
|
||||
---
|
||||
|
||||
# Installation
|
||||
|
||||
:::important
|
||||
1. The plugin **must** be installed in the same namespace as the CloudNativePG
|
||||
operator (typically `cnpg-system`).
|
||||
|
||||
2. Keep in mind that the operator's **listening namespaces** may differ from its
|
||||
installation namespace. Double-check this to avoid configuration issues.
|
||||
:::
|
||||
|
||||
## Verifying the Requirements
|
||||
|
||||
Before installing the plugin, make sure the [requirements](intro.md#requirements) are met.
|
||||
|
||||
### CloudNativePG Version
|
||||
|
||||
Ensure you're running a version of CloudNativePG that is compatible with the
|
||||
plugin. If installed in the default `cnpg-system` namespace, you can verify the
|
||||
version with:
|
||||
|
||||
```sh
|
||||
kubectl get deployment -n cnpg-system cnpg-controller-manager \
|
||||
-o jsonpath="{.spec.template.spec.containers[*].image}"
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0
|
||||
```
|
||||
|
||||
The version **must be 1.26 or newer**.
|
||||
|
||||
### cert-manager
|
||||
|
||||
Use the [cmctl](https://cert-manager.io/docs/reference/cmctl/#installation)
|
||||
tool to confirm that `cert-manager` is installed and available:
|
||||
|
||||
```sh
|
||||
cmctl check api
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
The cert-manager API is ready
|
||||
```
|
||||
|
||||
Both checks are required before proceeding with the installation.
|
||||
|
||||
## Installing the Barman Cloud Plugin
|
||||
|
||||
import { InstallationSnippet } from '@site/src/components/Installation';
|
||||
|
||||
Install the plugin using `kubectl` by applying the manifest for the latest
|
||||
release:
|
||||
|
||||
<InstallationSnippet />
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
customresourcedefinition.apiextensions.k8s.io/objectstores.barmancloud.cnpg.io created
|
||||
serviceaccount/plugin-barman-cloud created
|
||||
role.rbac.authorization.k8s.io/leader-election-role created
|
||||
clusterrole.rbac.authorization.k8s.io/metrics-auth-role created
|
||||
clusterrole.rbac.authorization.k8s.io/metrics-reader created
|
||||
clusterrole.rbac.authorization.k8s.io/objectstore-editor-role created
|
||||
clusterrole.rbac.authorization.k8s.io/objectstore-viewer-role created
|
||||
clusterrole.rbac.authorization.k8s.io/plugin-barman-cloud created
|
||||
rolebinding.rbac.authorization.k8s.io/leader-election-rolebinding created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/metrics-auth-rolebinding created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/plugin-barman-cloud-binding created
|
||||
secret/plugin-barman-cloud-8tfddg42gf created
|
||||
service/barman-cloud created
|
||||
deployment.apps/barman-cloud configured
|
||||
certificate.cert-manager.io/barman-cloud-client created
|
||||
certificate.cert-manager.io/barman-cloud-server created
|
||||
issuer.cert-manager.io/selfsigned-issuer created
|
||||
```
|
||||
|
||||
Finally, check that the deployment is up and running:
|
||||
|
||||
```sh
|
||||
kubectl rollout status deployment \
|
||||
-n cnpg-system barman-cloud
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
deployment "barman-cloud" successfully rolled out
|
||||
```
|
||||
|
||||
This confirms that the plugin is deployed and ready to use.
|
||||
|
||||
## Testing the latest development snapshot
|
||||
|
||||
You can also test the latest development snapshot of the plugin with the
|
||||
following command:
|
||||
|
||||
```sh
|
||||
kubectl apply -f \
|
||||
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
|
||||
```
|
||||
86
web/versioned_docs/version-0.8.0/intro.md
Normal file
86
web/versioned_docs/version-0.8.0/intro.md
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: "Introduction"
|
||||
---
|
||||
|
||||
# Barman Cloud Plugin
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The **Barman Cloud Plugin** for [CloudNativePG](https://cloudnative-pg.io/)
|
||||
enables online continuous physical backups of PostgreSQL clusters to object storage
|
||||
using the `barman-cloud` suite from the [Barman](https://docs.pgbarman.org/release/latest/)
|
||||
project.
|
||||
|
||||
:::important
|
||||
If you plan to migrate your existing CloudNativePG cluster to the new
|
||||
plugin-based approach using the Barman Cloud Plugin, see
|
||||
["Migrating from Built-in CloudNativePG Backup"](migration.md)
|
||||
for detailed instructions.
|
||||
:::
|
||||
|
||||
## Requirements
|
||||
|
||||
Before using the Barman Cloud Plugin, ensure that the following components are
|
||||
installed and properly configured:
|
||||
|
||||
- [CloudNativePG](https://cloudnative-pg.io) version 1.26 or later
|
||||
|
||||
- We strongly recommend version 1.27.0 or later, which includes improved
|
||||
error handling and status reporting for the plugin.
|
||||
- If you are running an earlier release, refer to the
|
||||
[upgrade guide](https://cloudnative-pg.io/documentation/current/installation_upgrade).
|
||||
|
||||
- [cert-manager](https://cert-manager.io/)
|
||||
|
||||
- The recommended way to enable secure TLS communication between the plugin
|
||||
and the operator.
|
||||
- Alternatively, you can provide your own certificate bundles. See the
|
||||
[CloudNativePG documentation on TLS configuration](https://cloudnative-pg.io/documentation/current/cnpg_i/#configuring-tls-certificates).
|
||||
|
||||
- [`kubectl-cnpg`](https://cloudnative-pg.io/documentation/current/kubectl-plugin/)
|
||||
plugin (optional but recommended)
|
||||
|
||||
- Simplifies debugging and monitoring with additional status and inspection
|
||||
commands.
|
||||
- Multiple installation options are available in the
|
||||
[installation guide](https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install).
|
||||
|
||||
## Key Features
|
||||
|
||||
This plugin provides the following capabilities:
|
||||
|
||||
- Physical online backup of the data directory
|
||||
- Physical restore of the data directory
|
||||
- Write-Ahead Log (WAL) archiving
|
||||
- WAL restore
|
||||
- Full cluster recovery
|
||||
- Point-in-Time Recovery (PITR)
|
||||
- Seamless integration with replica clusters for bootstrap and WAL restore from archive
|
||||
|
||||
:::important
|
||||
The Barman Cloud Plugin is designed to **replace the in-tree object storage support**
|
||||
previously provided via the `.spec.backup.barmanObjectStore` section in the
|
||||
`Cluster` resource.
|
||||
Backups created using the in-tree approach are fully supported and compatible
|
||||
with this plugin.
|
||||
:::
|
||||
|
||||
## Supported Object Storage Providers
|
||||
|
||||
The plugin works with all storage backends supported by `barman-cloud`, including:
|
||||
|
||||
- **Amazon S3**
|
||||
- **Google Cloud Storage**
|
||||
- **Microsoft Azure Blob Storage**
|
||||
|
||||
In addition, the following S3-compatible and simulator solutions have been
|
||||
tested and verified:
|
||||
|
||||
- [MinIO](https://min.io/) – An S3-compatible storage solution
|
||||
- [Azurite](https://github.com/Azure/Azurite) – A simulator for Azure Blob Storage
|
||||
- [fake-gcs-server](https://github.com/fsouza/fake-gcs-server) – A simulator for Google Cloud Storage
|
||||
|
||||
:::tip
|
||||
For more details, refer to [Object Store Providers](object_stores.md).
|
||||
:::
|
||||
274
web/versioned_docs/version-0.8.0/migration.md
Normal file
274
web/versioned_docs/version-0.8.0/migration.md
Normal file
@ -0,0 +1,274 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
---
|
||||
|
||||
# Migrating from Built-in CloudNativePG Backup
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The in-tree support for Barman Cloud in CloudNativePG is **deprecated starting
|
||||
from version 1.26** and will be removed in a future release.
|
||||
|
||||
If you're currently relying on the built-in Barman Cloud integration, you can
|
||||
migrate seamlessly to the new **plugin-based architecture** using the Barman
|
||||
Cloud Plugin, without data loss. Follow these steps:
|
||||
|
||||
- [Install the Barman Cloud Plugin](installation.mdx)
|
||||
- Create an `ObjectStore` resource by translating the contents of the
|
||||
`.spec.backup.barmanObjectStore` section from your existing `Cluster`
|
||||
definition
|
||||
- Modify the `Cluster` resource in a single atomic change to switch from
|
||||
in-tree backup to the plugin
|
||||
- Update any `ScheduledBackup` resources to use the plugin
|
||||
- Update the `externalClusters` configuration, where applicable
|
||||
|
||||
:::tip
|
||||
For a working example, refer to [this commit](https://github.com/cloudnative-pg/cnpg-playground/commit/596f30e252896edf8f734991c3538df87630f6f7)
|
||||
from the [CloudNativePG Playground project](https://github.com/cloudnative-pg/cnpg-playground),
|
||||
which demonstrates a full migration.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Define the `ObjectStore`
|
||||
|
||||
Begin by creating an `ObjectStore` resource in the same namespace as your
|
||||
PostgreSQL `Cluster`.
|
||||
|
||||
There is a **direct mapping** between the `.spec.backup.barmanObjectStore`
|
||||
section in CloudNativePG and the `.spec.configuration` field in the
|
||||
`ObjectStore` CR. The conversion is mostly mechanical, with one key difference:
|
||||
|
||||
:::warning
|
||||
In the plugin architecture, retention policies are defined as part of the `ObjectStore`.
|
||||
In contrast, the in-tree implementation defined them at the `Cluster` level.
|
||||
:::
|
||||
|
||||
If your `Cluster` used `.spec.backup.retentionPolicy`, move that configuration
|
||||
to `.spec.retentionPolicy` in the `ObjectStore`.
|
||||
|
||||
---
|
||||
|
||||
### Example
|
||||
|
||||
Here’s an excerpt from a traditional in-tree CloudNativePG backup configuration
|
||||
taken from the CloudNativePG Playground project:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-eu
|
||||
spec:
|
||||
# [...]
|
||||
backup:
|
||||
barmanObjectStore:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
This configuration translates to the following `ObjectStore` resource for the
|
||||
plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-eu
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
As you can see, the contents of `barmanObjectStore` have been copied directly
|
||||
under the `configuration` field of the `ObjectStore` resource, using the same
|
||||
secret references.
|
||||
|
||||
## Step 2: Update the `Cluster` for plugin WAL archiving
|
||||
|
||||
Once the `ObjectStore` resource is in place, update the `Cluster` resource as
|
||||
follows in a single atomic change:
|
||||
|
||||
- Remove the `.spec.backup.barmanObjectStore` section
|
||||
- Remove `.spec.backup.retentionPolicy` if it was defined (as it is now in the
|
||||
`ObjectStore`)
|
||||
- Remove the entire `spec.backup` section if it is now empty
|
||||
- Add `barman-cloud.cloudnative-pg.io` to the `plugins` list, as described in
|
||||
[Configuring WAL archiving](usage.md#configuring-wal-archiving)
|
||||
|
||||
This will trigger a rolling update of the `Cluster`, switching continuous
|
||||
backup from the in-tree implementation to the plugin-based approach.
|
||||
|
||||
### Example
|
||||
|
||||
The updated `pg-eu` cluster will have this configuration instead of the
|
||||
previous `backup` section:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-eu
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Update the `ScheduledBackup`
|
||||
|
||||
After switching the `Cluster` to use the plugin, update your `ScheduledBackup`
|
||||
resources to match.
|
||||
|
||||
Set the backup `method` to `plugin` and reference the plugin name via
|
||||
`pluginConfiguration`, as shown in ["Performing a base backup"](usage.md#performing-a-base-backup).
|
||||
|
||||
### Example
|
||||
|
||||
Original in-tree `ScheduledBackup`:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: pg-eu-backup
|
||||
spec:
|
||||
cluster:
|
||||
name: pg-eu
|
||||
schedule: '0 0 0 * * *'
|
||||
backupOwnerReference: self
|
||||
```
|
||||
|
||||
Updated version using the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: pg-eu-backup
|
||||
spec:
|
||||
cluster:
|
||||
name: pg-eu
|
||||
schedule: '0 0 0 * * *'
|
||||
backupOwnerReference: self
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Update the `externalClusters` configuration
|
||||
|
||||
If your `Cluster` relies on one or more external clusters that use the in-tree
|
||||
Barman Cloud integration, you need to update those configurations to use the
|
||||
plugin-based architecture.
|
||||
|
||||
When a replica cluster fetches WAL files or base backups from an external
|
||||
source that used the built-in backup method, follow these steps:
|
||||
|
||||
1. Create a corresponding `ObjectStore` resource for the external cluster, as
|
||||
shown in [Step 1](#step-1-define-the-objectstore)
|
||||
2. Update the `externalClusters` section of your replica cluster to use the
|
||||
plugin instead of the in-tree `barmanObjectStore` field
|
||||
|
||||
### Example
|
||||
|
||||
Consider the original configuration using in-tree Barman Cloud:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-us
|
||||
spec:
|
||||
# [...]
|
||||
externalClusters:
|
||||
- name: pg-eu
|
||||
barmanObjectStore:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
serverName: pg-eu
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
Create the `ObjectStore` resource for the external cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-eu
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
Update the external cluster configuration to use the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-us
|
||||
spec:
|
||||
# [...]
|
||||
externalClusters:
|
||||
- name: pg-eu
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-eu
|
||||
serverName: pg-eu
|
||||
```
|
||||
|
||||
## Step 5: Verify your metrics
|
||||
|
||||
When migrating from the in-core solution to the plugin-based approach, you need
|
||||
to monitor a different set of metrics, as described in the
|
||||
["Observability"](observability.md) section.
|
||||
|
||||
The table below summarizes the name changes between the old in-core metrics and
|
||||
the new plugin-based ones:
|
||||
|
||||
| Old metric name | New metric name |
|
||||
| ------------------------------------------------ | ---------------------------------------------------------------- |
|
||||
| `cnpg_collector_last_failed_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp` |
|
||||
| `cnpg_collector_last_available_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp` |
|
||||
| `cnpg_collector_first_recoverability_point` | `barman_cloud_cloudnative_pg_io_first_recoverability_point` |
|
||||
76
web/versioned_docs/version-0.8.0/misc.md
Normal file
76
web/versioned_docs/version-0.8.0/misc.md
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Miscellaneous
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
## Backup Object Tagging
|
||||
|
||||
You can attach key-value metadata tags to backup artifacts—such as base
|
||||
backups, WAL files, and history files—via the `.spec.configuration` section of
|
||||
the `ObjectStore` resource.
|
||||
|
||||
- `tags`: applied to base backups and WAL files
|
||||
- `historyTags`: applied to history files only
|
||||
|
||||
### Example
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
[...]
|
||||
tags:
|
||||
backupRetentionPolicy: "expire"
|
||||
historyTags:
|
||||
backupRetentionPolicy: "keep"
|
||||
[...]
|
||||
```
|
||||
|
||||
## Extra Options for Backup and WAL Archiving
|
||||
|
||||
You can pass additional command-line arguments to `barman-cloud-backup` and
|
||||
`barman-cloud-wal-archive` using the `additionalCommandArgs` field in the
|
||||
`ObjectStore` configuration.
|
||||
|
||||
- `.spec.configuration.data.additionalCommandArgs`: for `barman-cloud-backup`
|
||||
- `.spec.configuration.wal.additionalCommandArgs`: for `barman-cloud-wal-archive`
|
||||
|
||||
Each field accepts a list of string arguments. If an argument is already
|
||||
configured elsewhere in the plugin, the duplicate will be ignored.
|
||||
|
||||
### Example: Extra Backup Options
|
||||
|
||||
```yaml
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
data:
|
||||
additionalCommandArgs:
|
||||
- "--min-chunk-size=5MB"
|
||||
- "--read-timeout=60"
|
||||
```
|
||||
|
||||
### Example: Extra WAL Archive Options
|
||||
|
||||
```yaml
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
wal:
|
||||
additionalCommandArgs:
|
||||
- "--max-concurrency=1"
|
||||
- "--read-timeout=60"
|
||||
```
|
||||
|
||||
For a complete list of supported options, refer to the
|
||||
[official Barman Cloud documentation](https://docs.pgbarman.org/release/latest/).
|
||||
454
web/versioned_docs/version-0.8.0/object_stores.md
Normal file
454
web/versioned_docs/version-0.8.0/object_stores.md
Normal file
@ -0,0 +1,454 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
---
|
||||
|
||||
# Object Store Providers
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin enables the storage of PostgreSQL cluster backup files
|
||||
in any object storage service supported by the
|
||||
[Barman Cloud infrastructure](https://docs.pgbarman.org/release/latest/).
|
||||
|
||||
Currently, Barman Cloud supports the following providers:
|
||||
|
||||
- [Amazon S3](#aws-s3)
|
||||
- [Microsoft Azure Blob Storage](#azure-blob-storage)
|
||||
- [Google Cloud Storage](#google-cloud-storage)
|
||||
|
||||
You may also use any S3- or Azure-compatible implementation of the above
|
||||
services.
|
||||
|
||||
To configure object storage with Barman Cloud, you must define an
|
||||
[`ObjectStore` object](plugin-barman-cloud.v1.md#objectstore), which
|
||||
establishes the connection between your PostgreSQL cluster and the object
|
||||
storage backend.
|
||||
|
||||
Configuration details — particularly around authentication — will vary depending on
|
||||
the specific object storage provider you are using.
|
||||
|
||||
The following sections detail the setup for each.
|
||||
|
||||
---
|
||||
|
||||
## AWS S3
|
||||
|
||||
[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is one of the
|
||||
most widely adopted object storage solutions.
|
||||
|
||||
The Barman Cloud plugin for CloudNativePG integrates with S3 through two
|
||||
primary authentication mechanisms:
|
||||
|
||||
- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) —
|
||||
recommended for clusters running on EKS
|
||||
- Access keys — using `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials
|
||||
|
||||
### Access Keys
|
||||
|
||||
To authenticate using access keys, you’ll need:
|
||||
|
||||
- `ACCESS_KEY_ID`: the public key used to authenticate to S3
|
||||
- `ACCESS_SECRET_KEY`: the corresponding secret key
|
||||
- `ACCESS_SESSION_TOKEN`: (optional) a temporary session token, if required
|
||||
|
||||
These credentials must be stored securely in a Kubernetes secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic aws-creds \
|
||||
--from-literal=ACCESS_KEY_ID=<access key here> \
|
||||
--from-literal=ACCESS_SECRET_KEY=<secret key here>
|
||||
# --from-literal=ACCESS_SESSION_TOKEN=<session token here> # if required
|
||||
```
|
||||
|
||||
The credentials will be encrypted at rest if your Kubernetes environment
|
||||
supports it.
|
||||
|
||||
You can then reference the secret in your `ObjectStore` definition:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: aws-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: aws-creds
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: aws-creds
|
||||
key: ACCESS_SECRET_KEY
|
||||
[...]
|
||||
```
|
||||
|
||||
### IAM Role for Service Account (IRSA)
|
||||
|
||||
To use IRSA with EKS, configure the service account of the PostgreSQL cluster
|
||||
with the appropriate annotation:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
[...]
|
||||
spec:
|
||||
serviceAccountTemplate:
|
||||
metadata:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: arn:[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
### S3 Lifecycle Policy
|
||||
|
||||
Barman Cloud uploads backup files to S3 but does not modify or delete them afterward.
|
||||
To enhance data durability and protect against accidental or malicious loss,
|
||||
it's recommended to implement the following best practices:
|
||||
|
||||
- Enable object versioning
|
||||
- Enable object locking to prevent objects from being deleted or overwritten
|
||||
for a defined period or indefinitely (this provides an additional layer of
|
||||
protection against accidental deletion and ransomware attacks)
|
||||
- Set lifecycle rules to expire current versions a few days after your Barman
|
||||
retention window
|
||||
- Expire non-current versions after a longer period
|
||||
|
||||
These strategies help you safeguard backups without requiring broad delete
|
||||
permissions, ensuring both security and compliance with minimal operational
|
||||
overhead.
|
||||
|
||||
|
||||
### S3-Compatible Storage Providers
|
||||
|
||||
You can use S3-compatible services like **MinIO**, **Linode (Akamai) Object Storage**,
|
||||
or **DigitalOcean Spaces** by specifying a custom `endpointURL`.
|
||||
|
||||
Example with Linode (Akamai) Object Storage (`us-east1`):
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: linode-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/"
|
||||
endpointURL: "https://us-east1.linodeobjects.com"
|
||||
s3Credentials:
|
||||
[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
Recent changes to the [boto3 implementation](https://github.com/boto/boto3/issues/4392)
|
||||
of [Amazon S3 Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)
|
||||
may lead to the `x-amz-content-sha256` error when using the Barman Cloud
|
||||
Plugin.
|
||||
|
||||
If you encounter this issue (see [GitHub issue #393](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/393)),
|
||||
you can apply the following workaround by setting specific environment
|
||||
variables in the `ObjectStore` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: linode-store
|
||||
spec:
|
||||
instanceSidecarConfiguration:
|
||||
env:
|
||||
- name: AWS_REQUEST_CHECKSUM_CALCULATION
|
||||
value: when_required
|
||||
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
|
||||
value: when_required
|
||||
[...]
|
||||
```
|
||||
|
||||
These settings ensure that checksum calculations and validations are only
|
||||
applied when explicitly required, avoiding compatibility issues with certain
|
||||
S3-compatible storage providers.
|
||||
|
||||
Example with DigitalOcean Spaces (SFO3, path-style):
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: digitalocean-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||
endpointURL: "https://sfo3.digitaloceanspaces.com"
|
||||
s3Credentials:
|
||||
[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
### Using Object Storage with a Private CA
|
||||
|
||||
For object storage services (e.g., MinIO) that use HTTPS with certificates
|
||||
signed by a private CA, set the `endpointCA` field in the `ObjectStore`
|
||||
definition. Unless you already have it, create a Kubernetes `Secret` with the
|
||||
CA bundle:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic my-ca-secret --from-file=ca.crt
|
||||
```
|
||||
|
||||
Then reference it:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
endpointURL: <myEndpointURL>
|
||||
endpointCA:
|
||||
name: my-ca-secret
|
||||
key: ca.crt
|
||||
[...]
|
||||
```
|
||||
|
||||
<!-- TODO: does this also apply to the plugin? -->
|
||||
:::note
|
||||
If you want `ConfigMaps` and `Secrets` to be **automatically** reloaded by
|
||||
instances, you can add a label with the key `cnpg.io/reload` to the
|
||||
`Secrets`/`ConfigMaps`. Otherwise, you will have to reload the instances using the
|
||||
`kubectl cnpg reload` subcommand.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## Azure Blob Storage
|
||||
|
||||
[Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/)
|
||||
is Microsoft’s cloud-based object storage solution.
|
||||
|
||||
Barman Cloud supports the following authentication methods:
|
||||
|
||||
- [Connection String](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string)
|
||||
- Storage Account Name + [Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
|
||||
- Storage Account Name + [SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
|
||||
- [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
|
||||
|
||||
### Azure AD Workload Identity
|
||||
|
||||
This method avoids storing credentials in Kubernetes via the
|
||||
`.spec.configuration.inheritFromAzureAD` option:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: azure-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "<destination path here>"
|
||||
azureCredentials:
|
||||
inheritFromAzureAD: true
|
||||
[...]
|
||||
```
|
||||
|
||||
### Access Key, SAS Token, or Connection String
|
||||
|
||||
Store credentials in a Kubernetes secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic azure-creds \
|
||||
--from-literal=AZURE_STORAGE_ACCOUNT=<storage account name> \
|
||||
--from-literal=AZURE_STORAGE_KEY=<storage account key> \
|
||||
--from-literal=AZURE_STORAGE_SAS_TOKEN=<SAS token> \
|
||||
--from-literal=AZURE_STORAGE_CONNECTION_STRING=<connection string>
|
||||
```
|
||||
|
||||
Then reference the required keys in your `ObjectStore`:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: azure-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "<destination path here>"
|
||||
azureCredentials:
|
||||
connectionString:
|
||||
name: azure-creds
|
||||
key: AZURE_CONNECTION_STRING
|
||||
storageAccount:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_ACCOUNT
|
||||
storageKey:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_KEY
|
||||
storageSasToken:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_SAS_TOKEN
|
||||
[...]
|
||||
```
|
||||
|
||||
For Azure Blob, the destination path format is:
|
||||
|
||||
```
|
||||
<http|https>://<account-name>.<service-name>.core.windows.net/<container>/<blob>
|
||||
```
|
||||
|
||||
### Azure-Compatible Providers
|
||||
|
||||
If you're using a different implementation (e.g., Azurite or emulator):
|
||||
|
||||
```
|
||||
<http|https>://<local-machine-address>:<port>/<account-name>/<container>/<blob>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
[Google Cloud Storage](https://cloud.google.com/storage/) is supported with two
|
||||
authentication modes:
|
||||
|
||||
- **GKE Workload Identity** (recommended inside Google Kubernetes Engine)
|
||||
- **Service Account JSON key** via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
||||
|
||||
### GKE Workload Identity
|
||||
|
||||
Use the [Workload Identity authentication](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
|
||||
when running in GKE:
|
||||
|
||||
1. Set `googleCredentials.gkeEnvironment` to `true` in the `ObjectStore`
|
||||
resource
|
||||
2. Annotate the `serviceAccountTemplate` in the `Cluster` resource with the GCP
|
||||
service account
|
||||
|
||||
For example, in the `ObjectStore` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: google-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "gs://<bucket>/<folder>"
|
||||
googleCredentials:
|
||||
gkeEnvironment: true
|
||||
```
|
||||
|
||||
And in the `Cluster` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
spec:
|
||||
serviceAccountTemplate:
|
||||
metadata:
|
||||
annotations:
|
||||
iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
### Service Account JSON Key
|
||||
|
||||
Follow Google’s [authentication setup](https://cloud.google.com/docs/authentication/getting-started),
|
||||
then:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: google-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "gs://<bucket>/<folder>"
|
||||
googleCredentials:
|
||||
applicationCredentials:
|
||||
name: backup-creds
|
||||
key: gcsCredentials
|
||||
[...]
|
||||
```
|
||||
|
||||
:::important
|
||||
This authentication method generates a JSON file within the container
|
||||
with all the credentials required to access your Google Cloud Storage
|
||||
bucket. As a result, if someone gains access to the `Pod`, they will also have
|
||||
write permissions to the bucket.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
|
||||
## MinIO Object Store
|
||||
|
||||
In order to use the Tenant resource you first need to deploy the
|
||||
[MinIO operator](https://docs.min.io/community/minio-object-store/operations/deployments/installation.html).
|
||||
For the latest documentation of MinIO, please refer to the
|
||||
[MinIO official documentation](https://docs.min.io/community/minio-object-store/).
|
||||
|
||||
MinIO Object Store's API is compatible with S3, and the default configuration of the Tenant
|
||||
will create these services:
|
||||
- `<tenant>-console` on port 9090 (with autocert) or 9443 (without autocert)
|
||||
- `<tenant>-hl` on port 9000
|
||||
Where `<tenant>` is the `metadata.name` you assigned to your Tenant resource.
|
||||
|
||||
:::note
|
||||
The `<tenant>-console` service will only be available if you have enabled the
|
||||
[MinIO Console](https://docs.min.io/community/minio-object-store/administration/minio-console.html).
|
||||
|
||||
For example, the following Tenant:
|
||||
```yml
|
||||
apiVersion: minio.min.io/v2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: cnpg-backups
|
||||
spec:
|
||||
[...]
|
||||
```
|
||||
would have services called `cnpg-backups-console` and `cnpg-backups-hl` respectively.
|
||||
|
||||
The `console` service is for managing the tenant, while the `hl` service exposes the S3
|
||||
compatible API. If your tenant is configured with `requestAutoCert` you will communicate
|
||||
to these services over HTTPS, if not you will use HTTP.
|
||||
|
||||
For authentication you can use your username and password, or create an access key.
|
||||
Whichever method you choose, it has to be stored as a secret.
|
||||
|
||||
```sh
|
||||
kubectl create secret generic minio-creds \
|
||||
--from-literal=MINIO_ACCESS_KEY=<minio access key or username> \
|
||||
--from-literal=MINIO_SECRET_KEY=<minio secret key or password>
|
||||
```
|
||||
|
||||
Finally, create the Barman ObjectStore:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://BUCKET_NAME/
|
||||
endpointURL: http://<tenant>-hl:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-creds
|
||||
key: MINIO_ACCESS_KEY
|
||||
secretAccessKey:
|
||||
name: minio-creds
|
||||
key: MINIO_SECRET_KEY
|
||||
[...]
|
||||
```
|
||||
|
||||
:::important
|
||||
Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
|
||||
proceeding with a backup.
|
||||
:::
|
||||
|
||||
---
|
||||
24
web/versioned_docs/version-0.8.0/observability.md
Normal file
24
web/versioned_docs/version-0.8.0/observability.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 55
|
||||
---
|
||||
|
||||
# Observability
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin exposes the following metrics through the native
|
||||
Prometheus exporter of the instance manager:
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent failed backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent successfully available backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_first_recoverability_point`:
|
||||
the UNIX timestamp representing the earliest point in time from which the
|
||||
cluster can be recovered.
|
||||
|
||||
These metrics supersede the previously available in-core metrics that used the
|
||||
`cnpg_collector` prefix. The new metrics are exposed under the
|
||||
`barman_cloud_cloudnative_pg_io` prefix instead.
|
||||
19
web/versioned_docs/version-0.8.0/parameters.md
Normal file
19
web/versioned_docs/version-0.8.0/parameters.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
sidebar_position: 100
|
||||
---
|
||||
|
||||
# Parameters
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The following parameters are available for the Barman Cloud Plugin:
|
||||
|
||||
- `barmanObjectName`: references the `ObjectStore` resource to be used by the
|
||||
plugin.
|
||||
- `serverName`: Specifies the server name in the object store.
|
||||
|
||||
:::important
|
||||
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||
:::
|
||||
108
web/versioned_docs/version-0.8.0/plugin-barman-cloud.v1.md
Normal file
108
web/versioned_docs/version-0.8.0/plugin-barman-cloud.v1.md
Normal file
@ -0,0 +1,108 @@
|
||||
# API Reference
|
||||
|
||||
## Packages
|
||||
- [barmancloud.cnpg.io/v1](#barmancloudcnpgiov1)
|
||||
|
||||
|
||||
## barmancloud.cnpg.io/v1
|
||||
|
||||
Package v1 contains API Schema definitions for the barmancloud v1 API group
|
||||
|
||||
### Resource Types
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
|
||||
|
||||
#### InstanceSidecarConfiguration
|
||||
|
||||
|
||||
|
||||
InstanceSidecarConfiguration defines the configuration for the sidecar that runs in the instance pods.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStoreSpec](#objectstorespec)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envvar-v1-core) array_ | The environment to be explicitly passed to the sidecar | | | |
|
||||
| `retentionPolicyIntervalSeconds` _integer_ | The retentionCheckInterval defines the frequency at which the<br />system checks and enforces retention policies. | | 1800 | |
|
||||
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Resources define cpu/memory requests and limits for the sidecar that runs in the instance pods. | | | |
|
||||
| `additionalContainerArgs` _string array_ | AdditionalContainerArgs is an optional list of command-line arguments<br />to be passed to the sidecar container when it starts.<br />The provided arguments are appended to the container’s default arguments. | | | |
|
||||
| `logLevel` _string_ | The log level for PostgreSQL instances. Valid values are: `error`, `warning`, `info` (default), `debug`, `trace` | | info | Enum: [error warning info debug trace] <br /> |
|
||||
|
||||
|
||||
#### ObjectStore
|
||||
|
||||
|
||||
|
||||
ObjectStore is the Schema for the objectstores API.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `barmancloud.cnpg.io/v1` | True | | |
|
||||
| `kind` _string_ | `ObjectStore` | True | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | True | | |
|
||||
| `spec` _[ObjectStoreSpec](#objectstorespec)_ | Specification of the desired behavior of the ObjectStore.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | True | | |
|
||||
| `status` _[ObjectStoreStatus](#objectstorestatus)_ | Most recently observed status of the ObjectStore. This data may not be up to<br />date. Populated by the system. Read-only.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | |
|
||||
|
||||
|
||||
#### ObjectStoreSpec
|
||||
|
||||
|
||||
|
||||
ObjectStoreSpec defines the desired state of ObjectStore.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `configuration` _[BarmanObjectStoreConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration)_ | The configuration for the barman-cloud tool suite | True | | |
|
||||
| `retentionPolicy` _string_ | RetentionPolicy is the retention policy to be used for backups<br />and WALs (i.e. '60d'). The retention policy is expressed in the form<br />of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -<br />days, weeks, months. | | | Pattern: `^[1-9][0-9]*[dwm]$` <br /> |
|
||||
| `instanceSidecarConfiguration` _[InstanceSidecarConfiguration](#instancesidecarconfiguration)_ | The configuration for the sidecar that runs in the instance pods | | | |
|
||||
|
||||
|
||||
#### ObjectStoreStatus
|
||||
|
||||
|
||||
|
||||
ObjectStoreStatus defines the observed state of ObjectStore.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `serverRecoveryWindow` _object (keys:string, values:[RecoveryWindow](#recoverywindow))_ | ServerRecoveryWindow maps each server to its recovery window | True | | |
|
||||
|
||||
|
||||
#### RecoveryWindow
|
||||
|
||||
|
||||
|
||||
RecoveryWindow represents the time span between the first
|
||||
recoverability point and the last successful backup of a PostgreSQL
|
||||
server, defining the period during which data can be restored.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStoreStatus](#objectstorestatus)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `firstRecoverabilityPoint` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The first recoverability point in a PostgreSQL server refers to<br />the earliest point in time to which the database can be<br />restored. | True | | |
|
||||
| `lastSuccessfulBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last successful backup time | True | | |
|
||||
| `lastFailedBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last failed backup time | True | | |
|
||||
|
||||
|
||||
219
web/versioned_docs/version-0.8.0/resource-name-migration.md
Normal file
219
web/versioned_docs/version-0.8.0/resource-name-migration.md
Normal file
@ -0,0 +1,219 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Resource name migration guide
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
:::warning
|
||||
Before proceeding with the migration process, please:
|
||||
1. **Read this guide in its entirety** to understand what changes will be made
|
||||
2. **Test in a non-production environment** first if possible
|
||||
3. **Ensure you have proper backups** of your cluster configuration
|
||||
|
||||
This migration will delete old RBAC resources only after the
|
||||
`plugin-barman-cloud` upgrade. While the operation is designed to be safe, you
|
||||
should review and understand the changes before proceeding. The maintainers of
|
||||
this project are not responsible for any issues that may arise during
|
||||
migration.
|
||||
|
||||
**Note:** This guide assumes you are using the default `cnpg-system` namespace.
|
||||
:::
|
||||
|
||||
## Overview
|
||||
|
||||
Starting from version **0.8.0**, the `plugin-barman-cloud` deployment manifests
|
||||
use more specific, prefixed resource names to avoid conflicts with other
|
||||
components deployed in the same Kubernetes cluster.
|
||||
|
||||
## What Changed
|
||||
|
||||
The following resources have been renamed to use proper prefixes.
|
||||
|
||||
### Cluster-scoped Resources
|
||||
|
||||
| Old Name | New Name |
|
||||
|----------------------------|------------------------------------------|
|
||||
| `metrics-auth-role` | `barman-plugin-metrics-auth-role` |
|
||||
| `metrics-auth-rolebinding` | `barman-plugin-metrics-auth-rolebinding` |
|
||||
| `metrics-reader` | `barman-plugin-metrics-reader` |
|
||||
| `objectstore-viewer-role` | `barman-plugin-objectstore-viewer-role` |
|
||||
| `objectstore-editor-role` | `barman-plugin-objectstore-editor-role` |
|
||||
|
||||
### Namespace-scoped Resources
|
||||
|
||||
| Old Name | New Name | Namespace |
|
||||
|-------------------------------|---------------------------------------------|---------------|
|
||||
| `leader-election-role` | `barman-plugin-leader-election-role` | `cnpg-system` |
|
||||
| `leader-election-rolebinding` | `barman-plugin-leader-election-rolebinding` | `cnpg-system` |
|
||||
|
||||
## Why This Change?
|
||||
|
||||
Using generic names for cluster-wide resources is discouraged as they may
|
||||
conflict with other components deployed in the same cluster. The new names make
|
||||
it clear that these resources belong to the Barman Cloud plugin and help avoid
|
||||
naming collisions.
|
||||
|
||||
## Migration Instructions
|
||||
|
||||
This three steps migration process is straightforward and can be completed with
|
||||
a few `kubectl` commands.
|
||||
|
||||
### Step 1: Upgrade plugin-barman-cloud
|
||||
|
||||
Please refer to the [Installation](installation.mdx) section to deploy the new
|
||||
`plugin-barman-cloud` release.
|
||||
|
||||
### Step 2: Delete Old Cluster-scoped Resources
|
||||
|
||||
:::danger Verify Resources Before Deletion
|
||||
**IMPORTANT**: The old resource names are generic and could potentially belong
|
||||
to other components in your cluster.
|
||||
|
||||
**Before deleting each resource, verify it belongs to the Barman Cloud plugin
|
||||
by checking:**
|
||||
- For `objectstore-*` roles: Look for `barmancloud.cnpg.io` in the API groups
|
||||
- For `metrics-*` roles: Check if they reference the `plugin-barman-cloud`
|
||||
ServiceAccount in `cnpg-system` namespace
|
||||
- For other roles: Look for labels like `app.kubernetes.io/name: plugin-barman-cloud`
|
||||
|
||||
If a resource doesn't have these indicators, **DO NOT DELETE IT** as it may
|
||||
belong to another application.
|
||||
|
||||
Carefully review the output of each verification command before proceeding with
|
||||
the `delete`.
|
||||
:::
|
||||
|
||||
:::tip Dry Run First
|
||||
You can add `--dry-run=client` to any `kubectl delete` command to preview what
|
||||
would be deleted without actually removing anything.
|
||||
:::
|
||||
|
||||
**Only proceed if you've verified these resources belong to the Barman Cloud
|
||||
plugin (see warning above).**
|
||||
|
||||
For each resource below, first verify it belongs to Barman Cloud, then delete
|
||||
it:
|
||||
|
||||
```bash
|
||||
# 1. Check metrics-auth-rolebinding FIRST (we'll check the role after)
|
||||
# Look for references to plugin-barman-cloud ServiceAccount
|
||||
kubectl describe clusterrolebinding metrics-auth-rolebinding
|
||||
# If it references plugin-barman-cloud ServiceAccount in cnpg-system namespace,
|
||||
# delete it:
|
||||
kubectl delete clusterrolebinding metrics-auth-rolebinding
|
||||
|
||||
# 2. Check metrics-auth-role
|
||||
# Look for references to authentication.k8s.io and authorization.k8s.io
|
||||
kubectl describe clusterrole metrics-auth-role
|
||||
# Verify it's not being used by any other rolebindings:
|
||||
kubectl get clusterrolebinding -o json \
|
||||
| jq -r '.items[] | select(.roleRef.name=="metrics-auth-role") \
|
||||
| .metadata.name'
|
||||
# If the above returns nothing (role is not in use) and the role looks like the
|
||||
# Barman Cloud one, delete it (see warnings section):
|
||||
kubectl delete clusterrole metrics-auth-role
|
||||
|
||||
# 3. Check objectstore-viewer-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-viewer-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-viewer-role
|
||||
|
||||
# 4. Check objectstore-editor-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-editor-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-editor-role
|
||||
|
||||
# 5. Check metrics-reader (MOST DANGEROUS - very generic name)
|
||||
# First, check if it's being used by any rolebindings OTHER than barman's:
|
||||
kubectl get clusterrolebinding -o json | jq -r '.items[] \
|
||||
| select(.roleRef.name=="metrics-reader") \
|
||||
| "\(.metadata.name) -> \(.subjects[0].name) in \(.subjects[0].namespace)"'
|
||||
# If this shows ANY rolebindings, review them carefully. Only proceed if
|
||||
# they're all Barman-related. Then check the role itself:
|
||||
kubectl describe clusterrole metrics-reader
|
||||
# If it ONLY has nonResourceURLs: /metrics and NO other rolebindings use it,
|
||||
# delete it:
|
||||
kubectl delete clusterrole metrics-reader
|
||||
```
|
||||
|
||||
:::warning
|
||||
The `metrics-reader` role is particularly dangerous to delete blindly. Many
|
||||
monitoring systems use this exact name. Only delete it if:
|
||||
|
||||
1. You've verified it ONLY grants access to `/metrics`
|
||||
2. No other rolebindings reference it (checked with the jq command above)
|
||||
3. You're certain it was created by the Barman Cloud plugin
|
||||
|
||||
If you're unsure, it's safer to leave it and let the new
|
||||
`barman-plugin-metrics-reader` role coexist with it.
|
||||
:::
|
||||
|
||||
If any resource is not found during the `describe` command, that's okay - it
|
||||
means it was never created or already deleted. Simply skip the delete command
|
||||
for that resource.
|
||||
|
||||
### Step 3: Delete Old Namespace-scoped Resources
|
||||
|
||||
Delete the old namespace-scoped resources in the `cnpg-system` namespace:
|
||||
|
||||
```bash
|
||||
# Delete the old leader-election resources
|
||||
kubectl delete role leader-election-role -n cnpg-system
|
||||
kubectl delete rolebinding leader-election-rolebinding -n cnpg-system
|
||||
```
|
||||
|
||||
If any resource is not found, that's okay - it means it was never created or
|
||||
already deleted.
|
||||
|
||||
## Impact
|
||||
|
||||
- **Permissions:** If you have custom RBAC rules or tools that reference the
|
||||
old resource names, they will need to be updated.
|
||||
- **External Users:** If end users have been granted the
|
||||
`objectstore-viewer-role` or `objectstore-editor-role`, they will need to be
|
||||
re-granted the new role names (`barman-plugin-objectstore-viewer-role` and
|
||||
`barman-plugin-objectstore-editor-role`).
|
||||
|
||||
## Verification
|
||||
|
||||
After migration, verify that the new resources are created:
|
||||
|
||||
```bash
|
||||
# Check cluster-scoped resources
|
||||
kubectl get clusterrole | grep barman
|
||||
kubectl get clusterrolebinding | grep barman
|
||||
|
||||
# Check namespace-scoped resources
|
||||
kubectl get role,rolebinding -n cnpg-system | grep barman
|
||||
```
|
||||
|
||||
You should see the new prefixed resource names.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Plugin Not Starting After Migration
|
||||
|
||||
If the plugin fails to start after migration, check:
|
||||
|
||||
1. **ServiceAccount permissions:** Ensure the `plugin-barman-cloud` ServiceAccount is bound to the new roles:
|
||||
```bash
|
||||
kubectl get clusterrolebinding barman-plugin-metrics-auth-rolebinding -o yaml
|
||||
kubectl get rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system -o yaml
|
||||
```
|
||||
|
||||
2. **Role references:** Verify that the rolebindings reference the correct role names:
|
||||
```bash
|
||||
kubectl describe rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system
|
||||
kubectl describe clusterrolebinding barman-plugin-metrics-auth-rolebinding
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues during migration, please open an issue on the [GitHub
|
||||
repository](https://github.com/cloudnative-pg/plugin-barman-cloud/issues).
|
||||
38
web/versioned_docs/version-0.8.0/retention.md
Normal file
38
web/versioned_docs/version-0.8.0/retention.md
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
sidebar_position: 60
|
||||
---
|
||||
|
||||
# Retention Policies
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin supports **automated cleanup of obsolete backups** via
|
||||
retention policies, configured in the `.spec.retentionPolicy` field of the
|
||||
`ObjectStore` resource.
|
||||
|
||||
:::note
|
||||
This feature uses the `barman-cloud-backup-delete` command with the
|
||||
`--retention-policy "RECOVERY WINDOW OF {{ value }} {{ unit }}"` syntax.
|
||||
:::
|
||||
|
||||
#### Example: 30-Day Retention Policy
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
[...]
|
||||
retentionPolicy: "30d"
|
||||
````
|
||||
|
||||
:::note
|
||||
A **recovery window retention policy** ensures the cluster can be restored to
|
||||
any point in time between the calculated *Point of Recoverability* (PoR) and
|
||||
the latest WAL archive. The PoR is defined as `current time - recovery window`.
|
||||
The **first valid backup** is the most recent backup completed before the PoR.
|
||||
Backups older than that are marked as *obsolete* and deleted after the next
|
||||
backup completes.
|
||||
:::
|
||||
|
||||
580
web/versioned_docs/version-0.8.0/troubleshooting.md
Normal file
580
web/versioned_docs/version-0.8.0/troubleshooting.md
Normal file
@ -0,0 +1,580 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
This guide helps you diagnose and resolve common issues with the Barman Cloud
|
||||
plugin.
|
||||
|
||||
:::important
|
||||
We are continuously improving the integration between CloudNativePG and the
|
||||
Barman Cloud plugin as it moves toward greater stability and maturity. For this
|
||||
reason, we recommend using the latest available version of both components.
|
||||
See the [*Requirements* section](intro.md#requirements) for details.
|
||||
:::
|
||||
|
||||
:::note
|
||||
The following commands assume you installed the CloudNativePG operator in
|
||||
the default `cnpg-system` namespace. If you installed it in a different
|
||||
namespace, adjust the commands accordingly.
|
||||
:::
|
||||
|
||||
## Viewing Logs
|
||||
|
||||
To troubleshoot effectively, you’ll often need to review logs from multiple
|
||||
sources:
|
||||
|
||||
```sh
|
||||
# View operator logs (includes plugin interaction logs)
|
||||
kubectl logs -n cnpg-system deployment/cnpg-controller-manager -f
|
||||
|
||||
# View plugin manager logs
|
||||
kubectl logs -n cnpg-system deployment/barman-cloud -f
|
||||
|
||||
# View sidecar container logs (Barman Cloud operations)
|
||||
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud -f
|
||||
|
||||
# View all containers in a pod
|
||||
kubectl logs -n <namespace> <cluster-pod-name> --all-containers=true
|
||||
|
||||
# View previous container logs (if container restarted)
|
||||
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud --previous
|
||||
```
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Plugin Installation Issues
|
||||
|
||||
#### Plugin pods not starting
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Plugin pods stuck in `CrashLoopBackOff` or `Error`
|
||||
- Plugin deployment not ready
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
1. **Certificate issues**
|
||||
|
||||
```sh
|
||||
# Check if cert-manager is installed and running
|
||||
kubectl get pods -n cert-manager
|
||||
|
||||
# Check if the plugin certificate is created
|
||||
kubectl get certificates -n cnpg-system
|
||||
```
|
||||
|
||||
If cert-manager is not installed, install it first:
|
||||
|
||||
```sh
|
||||
# Note: other installation methods for cert-manager are available
|
||||
kubectl apply -f \
|
||||
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
|
||||
```
|
||||
|
||||
If you are using your own certificates without cert-manager, you will need
|
||||
to verify the entire certificate chain yourself.
|
||||
|
||||
|
||||
2. **Image pull errors**
|
||||
|
||||
```sh
|
||||
# Check pod events for image pull errors
|
||||
kubectl describe pod -n cnpg-system -l app=barman-cloud
|
||||
```
|
||||
|
||||
Verify the image exists and you have proper credentials if using a private
|
||||
registry.
|
||||
|
||||
|
||||
3. **Resource constraints**
|
||||
|
||||
```sh
|
||||
# Check node resources
|
||||
kubectl top nodes
|
||||
kubectl describe nodes
|
||||
```
|
||||
|
||||
Make sure your cluster has sufficient CPU and memory resources.
|
||||
|
||||
### Backup Failures
|
||||
|
||||
#### Quick Backup Troubleshooting Checklist
|
||||
|
||||
When a backup fails, follow these steps in order:
|
||||
|
||||
1. **Check backup status**:
|
||||
|
||||
```sh
|
||||
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||
```
|
||||
2. **Get error details and target pod**:
|
||||
|
||||
```sh
|
||||
kubectl describe backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name>
|
||||
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}'
|
||||
```
|
||||
3. **Check the target pod’s sidecar logs**:
|
||||
|
||||
```sh
|
||||
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}')
|
||||
|
||||
kubectl logs \
|
||||
-n <namespace> $TARGET_POD -c plugin-barman-cloud \
|
||||
--tail=100 | grep -E "ERROR|FATAL|panic"
|
||||
```
|
||||
4. **Check cluster events**:
|
||||
|
||||
```sh
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.name=<cluster-name> \
|
||||
--sort-by='.lastTimestamp'
|
||||
```
|
||||
5. **Verify plugin is running**:
|
||||
|
||||
```sh
|
||||
kubectl get pods \
|
||||
-n cnpg-system -l app=barman-cloud
|
||||
```
|
||||
6. **Check operator logs**:
|
||||
|
||||
```sh
|
||||
kubectl logs \
|
||||
-n cnpg-system deployment/cnpg-controller-manager \
|
||||
--tail=100 | grep -i "backup\|plugin"
|
||||
```
|
||||
7. **Check plugin manager logs**:
|
||||
|
||||
```sh
|
||||
kubectl logs \
|
||||
-n cnpg-system deployment/barman-cloud --tail=100
|
||||
```
|
||||
|
||||
#### Backup job fails immediately
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Backup pods terminate with error
|
||||
- No backup files appear in object storage
|
||||
- Backup shows `failed` phase with various error messages
|
||||
|
||||
**Common failure modes and solutions:**
|
||||
|
||||
1. **"requested plugin is not available" errors**
|
||||
|
||||
```
|
||||
requested plugin is not available: barman
|
||||
requested plugin is not available: barman-cloud
|
||||
requested plugin is not available: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
**Cause:** The plugin name in the Cluster configuration doesn’t match the
|
||||
deployed plugin, or the plugin isn’t registered.
|
||||
|
||||
**Solution:**
|
||||
|
||||
a. **Check plugin registration:**
|
||||
|
||||
```sh
|
||||
# If you have the `cnpg` plugin installed (v1.27.0+)
|
||||
kubectl cnpg status -n <namespace> <cluster-name>
|
||||
```
|
||||
|
||||
Look for the "Plugins status" section:
|
||||
```
|
||||
Plugins status
|
||||
Name Version Status Reported Operator Capabilities
|
||||
---- ------- ------ ------------------------------
|
||||
barman-cloud.cloudnative-pg.io 0.6.0 N/A Reconciler Hooks, Lifecycle Service
|
||||
```
|
||||
|
||||
b. **Verify plugin name in `Cluster` spec**:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
spec:
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectStore: <your-objectstore-name>
|
||||
```
|
||||
|
||||
c. **Check plugin deployment is running**:
|
||||
|
||||
```sh
|
||||
kubectl get deployment -n cnpg-system barman-cloud
|
||||
```
|
||||
|
||||
2. **"rpc error: code = Unknown desc = panic caught: assignment to entry in nil map" errors**
|
||||
|
||||
**Cause:** Misconfiguration in the `ObjectStore` (e.g., typo or missing field).
|
||||
|
||||
**Solution:**
|
||||
|
||||
- Review sidecar logs for details
|
||||
- Verify `ObjectStore` configuration and secrets
|
||||
- Common issues include:
|
||||
- Missing or incorrect secret references
|
||||
- Typos in configuration parameters
|
||||
- Missing required environment variables in secrets
|
||||
|
||||
#### Backup performance issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Backups take extremely long
|
||||
- Backups timeout
|
||||
|
||||
**Plugin-specific considerations:**
|
||||
|
||||
1. **Check `ObjectStore` parallelism settings**
|
||||
- Adjust `maxParallel` in `ObjectStore` configuration
|
||||
- Monitor sidecar container resource usage during backups
|
||||
|
||||
2. **Verify plugin resource allocation**
|
||||
- Check if the sidecar container has sufficient CPU/memory
|
||||
- Review plugin container logs for resource-related warnings
|
||||
|
||||
:::tip
|
||||
For Barman-specific features like compression, encryption, and performance
|
||||
tuning, refer to the [Barman documentation](https://docs.pgbarman.org/latest/).
|
||||
:::
|
||||
|
||||
### WAL Archiving Issues
|
||||
|
||||
#### WAL archiving stops
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- WAL files accumulate on the primary
|
||||
- Cluster shows WAL archiving warnings
|
||||
- Sidecar logs show WAL errors
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Check plugin sidecar logs for WAL archiving errors**
|
||||
```sh
|
||||
# Check recent WAL archive operations in sidecar
|
||||
kubectl logs -n <namespace> <primary-pod> -c plugin-barman-cloud \
|
||||
--tail=50 | grep -i wal
|
||||
```
|
||||
|
||||
2. **Check ObjectStore configuration for WAL settings**
|
||||
- Ensure ObjectStore has proper WAL retention settings
|
||||
- Verify credentials have permissions for WAL operations
|
||||
|
||||
### Restore Issues
|
||||
|
||||
#### Restore fails during recovery
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- New cluster stuck in recovery
|
||||
- Plugin sidecar shows restore errors
|
||||
- PostgreSQL won’t start
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Check plugin sidecar logs during restore**
|
||||
|
||||
```sh
|
||||
# Check the sidecar logs on the recovering cluster pods
|
||||
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||
-c plugin-barman-cloud --tail=100
|
||||
|
||||
# Look for restore-related errors
|
||||
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||
-c plugin-barman-cloud | grep -E "restore|recovery|ERROR"
|
||||
```
|
||||
|
||||
2. **Verify plugin can access backups**
|
||||
|
||||
```sh
|
||||
# Check if `ObjectStore` is properly configured for restore
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Check PostgreSQL recovery logs
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c postgres | grep -i recovery
|
||||
```
|
||||
|
||||
:::tip
|
||||
For detailed Barman restore operations and troubleshooting, refer to the
|
||||
[Barman documentation](https://docs.pgbarman.org/latest/barman-cloud-restore.html).
|
||||
:::
|
||||
|
||||
#### Point-in-time recovery (PITR) configuration issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- PITR doesn’t reach target time
|
||||
- WAL access errors
|
||||
- Recovery halts early
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Verify PITR configuration in the `Cluster` spec**
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: <cluster-restore-name>
|
||||
spec:
|
||||
storage:
|
||||
size: 1Gi
|
||||
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: origin
|
||||
recoveryTarget:
|
||||
targetTime: "2024-01-15 10:30:00"
|
||||
|
||||
externalClusters:
|
||||
- name: origin
|
||||
plugin:
|
||||
enabled: true
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: <object-store-name>
|
||||
serverName: <source-cluster-name>
|
||||
```
|
||||
|
||||
2. **Check sidecar logs for WAL-related errors**
|
||||
|
||||
```sh
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c plugin-barman-cloud | grep -i wal
|
||||
```
|
||||
|
||||
:::note
|
||||
For detailed PITR configuration and WAL management, see the
|
||||
[Barman PITR documentation](https://docs.pgbarman.org/latest/).
|
||||
:::
|
||||
|
||||
### Plugin Configuration Issues
|
||||
|
||||
#### Plugin cannot connect to object storage
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Sidecar logs show connection errors
|
||||
- Backups fail with authentication or network errors
|
||||
- `ObjectStore` resource reports errors
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. **Verify `ObjectStore` CRD configuration and secrets**
|
||||
|
||||
```sh
|
||||
# Check ObjectStore resource status
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Verify the secret exists and has correct keys for your provider
|
||||
kubectl get secret -n <namespace> <secret-name> \
|
||||
-o jsonpath='{.data}' | jq 'keys'
|
||||
```
|
||||
|
||||
2. **Check sidecar logs for connectivity issues**
|
||||
```sh
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c plugin-barman-cloud | grep -E "connect|timeout|SSL|cert"
|
||||
```
|
||||
|
||||
3. **Adjust provider-specific settings (endpoint, path style, etc.)**
|
||||
- See [Object Store Configuration](object_stores.md) for provider-specific settings
|
||||
- Ensure `endpointURL` and `s3UsePathStyle` match your storage type
|
||||
- Verify network policies allow egress to your storage provider
|
||||
|
||||
## Diagnostic Commands
|
||||
|
||||
### Using the `cnpg` plugin for `kubectl`
|
||||
|
||||
The `cnpg` plugin for `kubectl` provides extended debugging capabilities.
|
||||
Keep it updated:
|
||||
|
||||
```sh
|
||||
# Install or update the `cnpg` plugin
|
||||
kubectl krew install cnpg
|
||||
# Or using an alternative method: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install
|
||||
|
||||
# Check plugin status (requires CNPG 1.27.0+)
|
||||
kubectl cnpg status <cluster-name> -n <namespace>
|
||||
|
||||
# View cluster status in detail
|
||||
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
If problems persist:
|
||||
|
||||
1. **Check the documentation**
|
||||
|
||||
- [Installation Guide](installation.mdx)
|
||||
- [Object Store Configuration](object_stores.md) (for provider-specific settings)
|
||||
- [Usage Examples](usage.md)
|
||||
|
||||
|
||||
2. **Gather diagnostic information**
|
||||
|
||||
```sh
|
||||
# Create a diagnostic bundle (⚠️ sanitize these before sharing!)
|
||||
kubectl get objectstores.barmancloud.cnpg.io -A -o yaml > /tmp/objectstores.yaml
|
||||
kubectl get clusters.postgresql.cnpg.io -A -o yaml > /tmp/clusters.yaml
|
||||
kubectl logs -n cnpg-system deployment/barman-cloud --tail=1000 > /tmp/plugin.log
|
||||
```
|
||||
|
||||
|
||||
3. **Community support**
|
||||
|
||||
- CloudNativePG Slack: [#cloudnativepg-users](https://cloud-native.slack.com/messages/cloudnativepg-users)
|
||||
- GitHub Issues: [plugin-barman-cloud](https://github.com/cloudnative-pg/plugin-barman-cloud/issues)
|
||||
|
||||
|
||||
4. **Include when reporting**
|
||||
|
||||
- CloudNativePG version
|
||||
- Plugin version
|
||||
- Kubernetes version
|
||||
- Cloud provider and region
|
||||
- Relevant configuration (⚠️ sanitize/redact sensitive information)
|
||||
- Error messages and logs
|
||||
- Steps to reproduce
|
||||
|
||||
## Known Issues and Limitations
|
||||
|
||||
### Current Known Issues
|
||||
|
||||
1. **Migration compatibility**: After migrating from in-tree backup to the
|
||||
plugin, the `kubectl cnpg backup` command syntax has changed
|
||||
([#353](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/353)):
|
||||
|
||||
```sh
|
||||
# Old command (in-tree, no longer works after migration)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=barmanObjectStore
|
||||
|
||||
# New command (plugin-based)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin --plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
### Plugin Limitations
|
||||
|
||||
1. **Installation method**: Currently only supports manifest and Kustomize
|
||||
installation ([#351](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/351) -
|
||||
Helm chart requested)
|
||||
|
||||
2. **Sidecar resource sharing**: The plugin sidecar container shares pod
|
||||
resources with PostgreSQL
|
||||
|
||||
3. **Plugin restart behavior**: Restarting the sidecar container requires
|
||||
restarting the entire PostgreSQL pod
|
||||
|
||||
## Recap of General Debugging Steps
|
||||
|
||||
### Check Backup Status and Identify the Target Instance
|
||||
|
||||
```sh
|
||||
# List all backups and their status
|
||||
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||
|
||||
# Get detailed backup information including error messages and target instance
|
||||
kubectl describe backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name>
|
||||
|
||||
# Extract the target pod name from a failed backup
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}'
|
||||
|
||||
# Get more details including the target pod, method, phase, and error
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='Pod: {.status.instanceID.podName}{"\n"}Method: {.status.method}{"\n"}Phase: {.status.phase}{"\n"}Error: {.status.error}{"\n"}'
|
||||
|
||||
# Check the cluster status for backup-related information
|
||||
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||
```
|
||||
|
||||
### Check Sidecar Logs on the Backup Target Pod
|
||||
|
||||
```sh
|
||||
# Identify which pod was the backup target (from the previous step)
|
||||
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}')
|
||||
echo "Backup target pod: $TARGET_POD"
|
||||
|
||||
# Check the sidecar logs on the specific target pod
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud --tail=100
|
||||
|
||||
# Follow the logs in real time
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud -f
|
||||
|
||||
# Check for specific errors in the target pod around the backup time
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud --since=10m | grep -E "ERROR|FATAL|panic|failed"
|
||||
|
||||
# Alternative: List all cluster pods and their roles
|
||||
kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> \
|
||||
-o custom-columns=NAME:.metadata.name,ROLE:.metadata.labels.cnpg\\.io/instanceRole,INSTANCE:.metadata.labels.cnpg\\.io/instanceName
|
||||
|
||||
# Check sidecar logs on ALL cluster pods (if the target is unclear)
|
||||
for pod in $(kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> -o name); do
|
||||
echo "=== Checking $pod ==="
|
||||
kubectl logs -n <namespace> $pod -c plugin-barman-cloud \
|
||||
--tail=20 | grep -i error || echo "No errors found"
|
||||
done
|
||||
```
|
||||
|
||||
### Check Events for Backup-Related Issues
|
||||
|
||||
```sh
|
||||
# Check events for the cluster
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.name=<cluster-name>
|
||||
|
||||
# Check events for failed backups
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.kind=Backup
|
||||
|
||||
# Get all recent events in the namespace
|
||||
kubectl get events -n <namespace> --sort-by='.lastTimestamp' | tail -20
|
||||
```
|
||||
|
||||
### Verify `ObjectStore` Configuration
|
||||
|
||||
```sh
|
||||
# Check the ObjectStore resource
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Verify the secret exists and has the correct keys
|
||||
kubectl get secret -n <namespace> <secret-name> -o yaml
|
||||
# Alternatively
|
||||
kubectl get secret -n <namespace> <secret-name> -o jsonpath='{.data}' | jq 'keys'
|
||||
```
|
||||
|
||||
### Common Error Messages and Solutions
|
||||
|
||||
* **"AccessDenied" or "403 Forbidden"** — Check cloud credentials and bucket permissions.
|
||||
* **"NoSuchBucket"** — Verify the bucket exists and the endpoint URL is correct.
|
||||
* **"Connection timeout"** — Check network connectivity and firewall rules.
|
||||
* **"SSL certificate problem"** — For self-signed certificates, verify the CA bundle configuration.
|
||||
|
||||
16
web/versioned_docs/version-0.8.0/upgrades.mdx
Normal file
16
web/versioned_docs/version-0.8.0/upgrades.mdx
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 25
|
||||
---
|
||||
|
||||
# Upgrades
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
You can upgrade the plugin simply by installing the new version. Unless
|
||||
explicitly stated below or in the release notes, no special steps are required.
|
||||
|
||||
## Upgrading to version 0.8.x from previous versions
|
||||
|
||||
Version **0.8.0** introduces breaking changes to resource naming.
|
||||
To complete the upgrade successfully, follow the instructions in the
|
||||
["Resource name migration guide"](resource-name-migration.md).
|
||||
283
web/versioned_docs/version-0.8.0/usage.md
Normal file
283
web/versioned_docs/version-0.8.0/usage.md
Normal file
@ -0,0 +1,283 @@
|
||||
---
|
||||
sidebar_position: 30
|
||||
---
|
||||
|
||||
# Using the Barman Cloud Plugin
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
After [installing the plugin](installation.mdx) in the same namespace as the
|
||||
CloudNativePG operator, enabling your PostgreSQL cluster to use the Barman
|
||||
Cloud Plugin involves just a few steps:
|
||||
|
||||
- Defining the object store containing your WAL archive and base backups, using
|
||||
your preferred [provider](object_stores.md)
|
||||
- Instructing the Postgres cluster to use the Barman Cloud Plugin
|
||||
|
||||
From that moment, you’ll be able to issue on-demand backups or define a backup
|
||||
schedule, as well as rely on the object store for recovery operations.
|
||||
|
||||
The rest of this page details each step, using MinIO as object store provider.
|
||||
|
||||
## Defining the `ObjectStore`
|
||||
|
||||
An `ObjectStore` resource must be created for each object store used in your
|
||||
PostgreSQL architecture. Here's an example configuration using MinIO:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
The `.spec.configuration` schema follows the same format as the
|
||||
[in-tree barman-cloud support](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration).
|
||||
Refer to [the CloudNativePG documentation](https://cloudnative-pg.io/documentation/preview/backup_barmanobjectstore/)
|
||||
for additional details.
|
||||
|
||||
:::important
|
||||
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||
:::
|
||||
|
||||
## Configuring WAL Archiving
|
||||
|
||||
Once the `ObjectStore` is defined, you can configure your PostgreSQL cluster
|
||||
to archive WALs by referencing the store in the `.spec.plugins` section:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: Always
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-store
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
This configuration enables both WAL archiving and data directory backups.
|
||||
|
||||
## Performing a Base Backup
|
||||
|
||||
Once WAL archiving is enabled, the cluster is ready for backups. Backups can be
|
||||
created either declaratively (with YAML manifests) or imperatively (with the
|
||||
`cnpg` plugin).
|
||||
|
||||
### Declarative approach (YAML manifest)
|
||||
|
||||
Create a backup resource by applying a YAML manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Backup
|
||||
metadata:
|
||||
name: backup-example
|
||||
spec:
|
||||
cluster:
|
||||
name: cluster-example
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
### Imperative approach (using the `cnpg` plugin)
|
||||
|
||||
The quickest way to trigger an on-demand backup is with the `cnpg` plugin:
|
||||
|
||||
```bash
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin \
|
||||
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
:::note Migration from in-tree backups
|
||||
If you are migrating from the in-tree backup system, note the change in syntax:
|
||||
|
||||
```bash
|
||||
# Old command (in-tree backup)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> --method=barmanObjectStore
|
||||
|
||||
# New command (plugin-based backup)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin \
|
||||
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
:::
|
||||
|
||||
## Restoring a Cluster
|
||||
|
||||
To restore a cluster from an object store, create a new `Cluster` resource that
|
||||
references the store containing the backup. Below is an example configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-restore
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: IfNotPresent
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store
|
||||
serverName: cluster-example
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
:::important
|
||||
The above configuration does **not** enable WAL archiving for the restored cluster.
|
||||
:::
|
||||
|
||||
To enable WAL archiving for the restored cluster, include the `.spec.plugins`
|
||||
section alongside the `externalClusters.plugin` section, as shown below:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-restore
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: IfNotPresent
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
# Backup Object Store (push, read-write)
|
||||
barmanObjectName: minio-store-bis
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
# Recovery Object Store (pull, read-only)
|
||||
barmanObjectName: minio-store
|
||||
serverName: cluster-example
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
The same object store may be used for both transaction log archiving and
|
||||
restoring a cluster, or you can configure separate stores for these purposes.
|
||||
|
||||
## Configuring Replica Clusters
|
||||
|
||||
You can set up a distributed topology by combining the previously defined
|
||||
configurations with the `.spec.replica` section. Below is an example of how to
|
||||
define a replica cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-dc-a
|
||||
spec:
|
||||
instances: 3
|
||||
primaryUpdateStrategy: unsupervised
|
||||
|
||||
storage:
|
||||
storageClass: csi-hostpath-sc
|
||||
size: 1Gi
|
||||
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-store-a
|
||||
|
||||
replica:
|
||||
self: cluster-dc-a
|
||||
primary: cluster-dc-a
|
||||
source: cluster-dc-b
|
||||
|
||||
externalClusters:
|
||||
- name: cluster-dc-a
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store-a
|
||||
|
||||
- name: cluster-dc-b
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store-b
|
||||
```
|
||||
|
||||
## Configuring the plugin instance sidecar
|
||||
|
||||
The Barman Cloud Plugin runs as a sidecar container next to each PostgreSQL
|
||||
instance pod. It manages backup, WAL archiving, and restore processes.
|
||||
|
||||
Configuration comes from multiple `ObjectStore` resources:
|
||||
|
||||
1. The one referenced in the
|
||||
`.spec.plugins` section of the `Cluster`. This is the
|
||||
object store used for WAL archiving and base backups.
|
||||
2. The one referenced in the external cluster
|
||||
used in the `.spec.replica.source` section of the `Cluster`. This is
|
||||
used by the log-shipping designated primary to get the WAL files.
|
||||
3. The one referenced in the
|
||||
`.spec.bootstrap.recovery.source` section of the `Cluster`. Used by
|
||||
the initial recovery job to create the cluster from an existing backup.
|
||||
|
||||
You can fine-tune sidecar behavior in the `.spec.instanceSidecarConfiguration`
|
||||
of your ObjectStore. These settings apply to all PostgreSQL instances that use
|
||||
this object store. Any updates take effect at the next `Cluster` reconciliation,
|
||||
and could generate a rollout of the `Cluster`.
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
# [...]
|
||||
instanceSidecarConfiguration:
|
||||
retentionPolicyIntervalSeconds: 1800
|
||||
resources:
|
||||
requests:
|
||||
memory: "XXX"
|
||||
cpu: "YYY"
|
||||
limits:
|
||||
memory: "XXX"
|
||||
cpu: "YYY"
|
||||
```
|
||||
|
||||
:::note
|
||||
If more than one `ObjectStore` applies, the `instanceSidecarConfiguration` of
|
||||
the one set in `.spec.plugins` has priority.
|
||||
:::
|
||||
43
web/versioned_docs/version-0.9.0/compression.md
Normal file
43
web/versioned_docs/version-0.9.0/compression.md
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
sidebar_position: 80
|
||||
---
|
||||
|
||||
# Compression
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
By default, backups and WAL files are archived **uncompressed**. However, the
|
||||
Barman Cloud Plugin supports multiple compression algorithms via
|
||||
`barman-cloud-backup` and `barman-cloud-wal-archive`, allowing you to optimize
|
||||
for space, speed, or a balance of both.
|
||||
|
||||
### Supported Compression Algorithms
|
||||
|
||||
- `bzip2`
|
||||
- `gzip`
|
||||
- `lz4` (WAL only)
|
||||
- `snappy`
|
||||
- `xz` (WAL only)
|
||||
- `zstd` (WAL only)
|
||||
|
||||
Compression settings for base backups and WAL archives are configured
|
||||
independently. For implementation details, refer to the corresponding API
|
||||
definitions:
|
||||
|
||||
- [`DataBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration)
|
||||
- [`WALBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration)
|
||||
|
||||
:::important
|
||||
Compression impacts both performance and storage efficiency. Choose the right
|
||||
algorithm based on your recovery time objectives (RTO), storage capacity, and
|
||||
network throughput.
|
||||
:::
|
||||
|
||||
## Compression Benchmark (on MinIO)
|
||||
|
||||
| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed Size (MB) | Compressed Size (MB) | Ratio |
|
||||
| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ----- |
|
||||
| None | 10,927 | 7,553 | 395 | 395 | 1.0:1 |
|
||||
| bzip2 | 25,404 | 13,886 | 395 | 67 | 5.9:1 |
|
||||
| gzip | 116,281 | 3,077 | 395 | 91 | 4.3:1 |
|
||||
| snappy | 8,134 | 8,341 | 395 | 166 | 2.4:1 |
|
||||
177
web/versioned_docs/version-0.9.0/concepts.md
Normal file
177
web/versioned_docs/version-0.9.0/concepts.md
Normal file
@ -0,0 +1,177 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
---
|
||||
|
||||
# Main Concepts
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
:::important
|
||||
Before proceeding, make sure to review the following sections of the
|
||||
CloudNativePG documentation:
|
||||
|
||||
- [**Backup**](https://cloudnative-pg.io/documentation/current/backup/)
|
||||
- [**WAL Archiving**](https://cloudnative-pg.io/documentation/current/wal_archiving/)
|
||||
- [**Recovery**](https://cloudnative-pg.io/documentation/current/recovery/)
|
||||
:::
|
||||
|
||||
The **Barman Cloud Plugin** enables **hot (online) backups** of PostgreSQL
|
||||
clusters in CloudNativePG through [`barman-cloud`](https://pgbarman.org),
|
||||
supporting continuous physical backups and WAL archiving to an **object
|
||||
store**—without interrupting write operations.
|
||||
|
||||
It also supports both **full recovery** and **Point-in-Time Recovery (PITR)**
|
||||
of a PostgreSQL cluster.
|
||||
|
||||
## The Object Store
|
||||
|
||||
At the core is the [`ObjectStore` custom resource (CRD)](plugin-barman-cloud.v1.md#objectstorespec),
|
||||
which acts as the interface between the PostgreSQL cluster and the target
|
||||
object storage system. It allows you to configure:
|
||||
|
||||
- **Authentication and bucket location** via the `.spec.configuration` section
|
||||
- **WAL archiving** settings—such as compression type, parallelism, and
|
||||
server-side encryption—under `.spec.configuration.wal`
|
||||
- **Base backup options**—with similar settings for compression, concurrency,
|
||||
and encryption—under `.spec.configuration.data`
|
||||
- **Retention policies** to manage the life-cycle of archived WALs and backups
|
||||
via `.spec.configuration.retentionPolicy`
|
||||
|
||||
WAL files are archived in the `wals` directory, while base backups are stored
|
||||
as **tarballs** in the `base` directory, following the
|
||||
[Barman Cloud convention](https://docs.pgbarman.org/cloud/latest/usage/#object-store-layout).
|
||||
|
||||
The plugin also offers advanced capabilities, including
|
||||
[backup tagging](misc.md#backup-object-tagging) and
|
||||
[extra options for backups and WAL archiving](misc.md#extra-options-for-backup-and-wal-archiving).
|
||||
|
||||
:::tip
|
||||
For details, refer to the
|
||||
[API reference for the `ObjectStore` resource](plugin-barman-cloud.v1.md#objectstorespec).
|
||||
:::
|
||||
|
||||
## Integration with a CloudNativePG Cluster
|
||||
|
||||
CloudNativePG can delegate continuous backup and recovery responsibilities to
|
||||
the **Barman Cloud Plugin** by configuring the `.spec.plugins` section of a
|
||||
`Cluster` resource. This setup requires a corresponding `ObjectStore` resource
|
||||
to be defined.
|
||||
|
||||
:::important
|
||||
While it is technically possible to reuse the same `ObjectStore` for multiple
|
||||
`Cluster` resources within the same namespace, it is strongly recommended to
|
||||
dedicate one object store per PostgreSQL cluster to ensure data isolation and
|
||||
operational clarity.
|
||||
:::
|
||||
|
||||
The following example demonstrates how to configure a CloudNativePG cluster
|
||||
named `cluster-example` to use a previously defined `ObjectStore` (also named
|
||||
`cluster-example`) in the same namespace. Setting `isWALArchiver: true` enables
|
||||
WAL archiving through the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
# Other cluster settings...
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: cluster-example
|
||||
```
|
||||
|
||||
## Backup of a Postgres Cluster
|
||||
|
||||
Once the object store is defined and the `Cluster` is configured to use the
|
||||
Barman Cloud Plugin, **WAL archiving is activated immediately** on the
|
||||
PostgreSQL primary.
|
||||
|
||||
Physical base backups are seamlessly managed by CloudNativePG using the
|
||||
`Backup` and `ScheduledBackup` resources, respectively for
|
||||
[on-demand](https://cloudnative-pg.io/documentation/current/backup/#on-demand-backups)
|
||||
and
|
||||
[scheduled](https://cloudnative-pg.io/documentation/current/backup/#scheduled-backups)
|
||||
backups.
|
||||
|
||||
To use the Barman Cloud Plugin, you must set the `method` to `plugin` and
|
||||
configure the `pluginConfiguration` section as shown:
|
||||
|
||||
```yaml
|
||||
[...]
|
||||
spec:
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
[...]
|
||||
```
|
||||
|
||||
With this configuration, CloudNativePG supports:
|
||||
|
||||
- Backups from both **primary** and **standby** instances
|
||||
- Backups from **designated primaries** in a distributed topology using
|
||||
[replica clusters](https://cloudnative-pg.io/documentation/current/replica_cluster/)
|
||||
|
||||
:::tip
|
||||
For details on how to back up from a standby, refer to the official documentation:
|
||||
[Backup from a standby](https://cloudnative-pg.io/documentation/current/backup/#backup-from-a-standby).
|
||||
:::
|
||||
|
||||
:::important
|
||||
Both backup and WAL archiving operations are executed by sidecar containers
|
||||
running in the same pod as the PostgreSQL `Cluster` primary instance—except
|
||||
when backups are taken from a standby, in which case the sidecar runs alongside
|
||||
the standby pod.
|
||||
The sidecar containers use a [dedicated container image](images.md) that
|
||||
includes only the supported version of Barman Cloud.
|
||||
:::
|
||||
|
||||
## Recovery of a Postgres Cluster
|
||||
|
||||
In PostgreSQL, *recovery* refers to the process of starting a database instance
|
||||
from an existing backup. The Barman Cloud Plugin integrates with CloudNativePG
|
||||
to support both **full recovery** and **Point-in-Time Recovery (PITR)** from an
|
||||
object store.
|
||||
|
||||
Recovery in this context is *not in-place*: it bootstraps a brand-new
|
||||
PostgreSQL cluster from a backup and replays the necessary WAL files to reach
|
||||
the desired recovery target.
|
||||
|
||||
To perform a recovery, define an *external cluster* that references the
|
||||
appropriate `ObjectStore`, and use it as the source in the `bootstrap` section
|
||||
of the target cluster:
|
||||
|
||||
```yaml
|
||||
[...]
|
||||
spec:
|
||||
[...]
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: cluster-example
|
||||
serverName: cluster-example
|
||||
[...]
|
||||
```
|
||||
|
||||
The critical element here is the `externalClusters` section of the `Cluster`
|
||||
resource, where the `plugin` stanza instructs CloudNativePG to use the Barman
|
||||
Cloud Plugin to access the object store for recovery.
|
||||
|
||||
This same mechanism can be used for a variety of scenarios enabled by the
|
||||
CloudNativePG API, including:
|
||||
|
||||
* **Full cluster recovery** from the latest backup
|
||||
* **Point-in-Time Recovery (PITR)**
|
||||
* Bootstrapping **replica clusters** in a distributed topology
|
||||
|
||||
:::tip
|
||||
For complete instructions and advanced use cases, refer to the official
|
||||
[Recovery documentation](https://cloudnative-pg.io/documentation/current/recovery/).
|
||||
:::
|
||||
37
web/versioned_docs/version-0.9.0/images.md
Normal file
37
web/versioned_docs/version-0.9.0/images.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
sidebar_position: 99
|
||||
---
|
||||
|
||||
# Container Images
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin is distributed using two container images:
|
||||
|
||||
- One for deploying the plugin components
|
||||
- One for the sidecar that runs alongside each PostgreSQL instance in a
|
||||
CloudNativePG `Cluster` using the plugin
|
||||
|
||||
## Plugin Container Image
|
||||
|
||||
The plugin image contains the logic required to operate the Barman Cloud Plugin
|
||||
within your Kubernetes environment with CloudNativePG. It is published on the
|
||||
GitHub Container Registry at `ghcr.io/cloudnative-pg/plugin-barman-cloud`.
|
||||
|
||||
This image is built from the
|
||||
[`Dockerfile.plugin`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.plugin)
|
||||
in the plugin repository.
|
||||
|
||||
## Sidecar Container Image
|
||||
|
||||
The sidecar image is used within each PostgreSQL pod in the cluster. It
|
||||
includes the latest supported version of Barman Cloud and is responsible for
|
||||
performing WAL archiving and backups on behalf of CloudNativePG.
|
||||
|
||||
It is available at `ghcr.io/cloudnative-pg/plugin-barman-cloud-sidecar` and is
|
||||
built from the
|
||||
[`Dockerfile.sidecar`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.sidecar).
|
||||
|
||||
These sidecar images are designed to work seamlessly with the
|
||||
[`minimal` PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images)
|
||||
maintained by the CloudNativePG Community.
|
||||
109
web/versioned_docs/version-0.9.0/installation.mdx
Normal file
109
web/versioned_docs/version-0.9.0/installation.mdx
Normal file
@ -0,0 +1,109 @@
|
||||
---
|
||||
sidebar_position: 20
|
||||
---
|
||||
|
||||
# Installation
|
||||
|
||||
:::important
|
||||
1. The plugin **must** be installed in the same namespace as the CloudNativePG
|
||||
operator (typically `cnpg-system`).
|
||||
|
||||
2. Keep in mind that the operator's **listening namespaces** may differ from its
|
||||
installation namespace. Double-check this to avoid configuration issues.
|
||||
:::
|
||||
|
||||
## Verifying the Requirements
|
||||
|
||||
Before installing the plugin, make sure the [requirements](intro.md#requirements) are met.
|
||||
|
||||
### CloudNativePG Version
|
||||
|
||||
Ensure you're running a version of CloudNativePG that is compatible with the
|
||||
plugin. If installed in the default `cnpg-system` namespace, you can verify the
|
||||
version with:
|
||||
|
||||
```sh
|
||||
kubectl get deployment -n cnpg-system cnpg-controller-manager \
|
||||
-o jsonpath="{.spec.template.spec.containers[*].image}"
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0
|
||||
```
|
||||
|
||||
The version **must be 1.26 or newer**.
|
||||
|
||||
### cert-manager
|
||||
|
||||
Use the [cmctl](https://cert-manager.io/docs/reference/cmctl/#installation)
|
||||
tool to confirm that `cert-manager` is installed and available:
|
||||
|
||||
```sh
|
||||
cmctl check api
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
The cert-manager API is ready
|
||||
```
|
||||
|
||||
Both checks are required before proceeding with the installation.
|
||||
|
||||
## Installing the Barman Cloud Plugin
|
||||
|
||||
import { InstallationSnippet } from '@site/src/components/Installation';
|
||||
|
||||
Install the plugin using `kubectl` by applying the manifest for the latest
|
||||
release:
|
||||
|
||||
<InstallationSnippet />
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
customresourcedefinition.apiextensions.k8s.io/objectstores.barmancloud.cnpg.io created
|
||||
serviceaccount/plugin-barman-cloud created
|
||||
role.rbac.authorization.k8s.io/leader-election-role created
|
||||
clusterrole.rbac.authorization.k8s.io/metrics-auth-role created
|
||||
clusterrole.rbac.authorization.k8s.io/metrics-reader created
|
||||
clusterrole.rbac.authorization.k8s.io/objectstore-editor-role created
|
||||
clusterrole.rbac.authorization.k8s.io/objectstore-viewer-role created
|
||||
clusterrole.rbac.authorization.k8s.io/plugin-barman-cloud created
|
||||
rolebinding.rbac.authorization.k8s.io/leader-election-rolebinding created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/metrics-auth-rolebinding created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/plugin-barman-cloud-binding created
|
||||
secret/plugin-barman-cloud-8tfddg42gf created
|
||||
service/barman-cloud created
|
||||
deployment.apps/barman-cloud configured
|
||||
certificate.cert-manager.io/barman-cloud-client created
|
||||
certificate.cert-manager.io/barman-cloud-server created
|
||||
issuer.cert-manager.io/selfsigned-issuer created
|
||||
```
|
||||
|
||||
Finally, check that the deployment is up and running:
|
||||
|
||||
```sh
|
||||
kubectl rollout status deployment \
|
||||
-n cnpg-system barman-cloud
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```output
|
||||
deployment "barman-cloud" successfully rolled out
|
||||
```
|
||||
|
||||
This confirms that the plugin is deployed and ready to use.
|
||||
|
||||
## Testing the latest development snapshot
|
||||
|
||||
You can also test the latest development snapshot of the plugin with the
|
||||
following command:
|
||||
|
||||
```sh
|
||||
kubectl apply -f \
|
||||
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
|
||||
```
|
||||
86
web/versioned_docs/version-0.9.0/intro.md
Normal file
86
web/versioned_docs/version-0.9.0/intro.md
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: "Introduction"
|
||||
---
|
||||
|
||||
# Barman Cloud Plugin
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The **Barman Cloud Plugin** for [CloudNativePG](https://cloudnative-pg.io/)
|
||||
enables online continuous physical backups of PostgreSQL clusters to object storage
|
||||
using the `barman-cloud` suite from the [Barman](https://docs.pgbarman.org/release/latest/)
|
||||
project.
|
||||
|
||||
:::important
|
||||
If you plan to migrate your existing CloudNativePG cluster to the new
|
||||
plugin-based approach using the Barman Cloud Plugin, see
|
||||
["Migrating from Built-in CloudNativePG Backup"](migration.md)
|
||||
for detailed instructions.
|
||||
:::
|
||||
|
||||
## Requirements
|
||||
|
||||
Before using the Barman Cloud Plugin, ensure that the following components are
|
||||
installed and properly configured:
|
||||
|
||||
- [CloudNativePG](https://cloudnative-pg.io) version 1.26 or later
|
||||
|
||||
- We strongly recommend version 1.27.0 or later, which includes improved
|
||||
error handling and status reporting for the plugin.
|
||||
- If you are running an earlier release, refer to the
|
||||
[upgrade guide](https://cloudnative-pg.io/documentation/current/installation_upgrade).
|
||||
|
||||
- [cert-manager](https://cert-manager.io/)
|
||||
|
||||
- The recommended way to enable secure TLS communication between the plugin
|
||||
and the operator.
|
||||
- Alternatively, you can provide your own certificate bundles. See the
|
||||
[CloudNativePG documentation on TLS configuration](https://cloudnative-pg.io/documentation/current/cnpg_i/#configuring-tls-certificates).
|
||||
|
||||
- [`kubectl-cnpg`](https://cloudnative-pg.io/documentation/current/kubectl-plugin/)
|
||||
plugin (optional but recommended)
|
||||
|
||||
- Simplifies debugging and monitoring with additional status and inspection
|
||||
commands.
|
||||
- Multiple installation options are available in the
|
||||
[installation guide](https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install).
|
||||
|
||||
## Key Features
|
||||
|
||||
This plugin provides the following capabilities:
|
||||
|
||||
- Physical online backup of the data directory
|
||||
- Physical restore of the data directory
|
||||
- Write-Ahead Log (WAL) archiving
|
||||
- WAL restore
|
||||
- Full cluster recovery
|
||||
- Point-in-Time Recovery (PITR)
|
||||
- Seamless integration with replica clusters for bootstrap and WAL restore from archive
|
||||
|
||||
:::important
|
||||
The Barman Cloud Plugin is designed to **replace the in-tree object storage support**
|
||||
previously provided via the `.spec.backup.barmanObjectStore` section in the
|
||||
`Cluster` resource.
|
||||
Backups created using the in-tree approach are fully supported and compatible
|
||||
with this plugin.
|
||||
:::
|
||||
|
||||
## Supported Object Storage Providers
|
||||
|
||||
The plugin works with all storage backends supported by `barman-cloud`, including:
|
||||
|
||||
- **Amazon S3**
|
||||
- **Google Cloud Storage**
|
||||
- **Microsoft Azure Blob Storage**
|
||||
|
||||
In addition, the following S3-compatible and simulator solutions have been
|
||||
tested and verified:
|
||||
|
||||
- [MinIO](https://min.io/) – An S3-compatible storage solution
|
||||
- [Azurite](https://github.com/Azure/Azurite) – A simulator for Azure Blob Storage
|
||||
- [fake-gcs-server](https://github.com/fsouza/fake-gcs-server) – A simulator for Google Cloud Storage
|
||||
|
||||
:::tip
|
||||
For more details, refer to [Object Store Providers](object_stores.md).
|
||||
:::
|
||||
274
web/versioned_docs/version-0.9.0/migration.md
Normal file
274
web/versioned_docs/version-0.9.0/migration.md
Normal file
@ -0,0 +1,274 @@
|
||||
---
|
||||
sidebar_position: 40
|
||||
---
|
||||
|
||||
# Migrating from Built-in CloudNativePG Backup
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The in-tree support for Barman Cloud in CloudNativePG is **deprecated starting
|
||||
from version 1.26** and will be removed in a future release.
|
||||
|
||||
If you're currently relying on the built-in Barman Cloud integration, you can
|
||||
migrate seamlessly to the new **plugin-based architecture** using the Barman
|
||||
Cloud Plugin, without data loss. Follow these steps:
|
||||
|
||||
- [Install the Barman Cloud Plugin](installation.mdx)
|
||||
- Create an `ObjectStore` resource by translating the contents of the
|
||||
`.spec.backup.barmanObjectStore` section from your existing `Cluster`
|
||||
definition
|
||||
- Modify the `Cluster` resource in a single atomic change to switch from
|
||||
in-tree backup to the plugin
|
||||
- Update any `ScheduledBackup` resources to use the plugin
|
||||
- Update the `externalClusters` configuration, where applicable
|
||||
|
||||
:::tip
|
||||
For a working example, refer to [this commit](https://github.com/cloudnative-pg/cnpg-playground/commit/596f30e252896edf8f734991c3538df87630f6f7)
|
||||
from the [CloudNativePG Playground project](https://github.com/cloudnative-pg/cnpg-playground),
|
||||
which demonstrates a full migration.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Define the `ObjectStore`
|
||||
|
||||
Begin by creating an `ObjectStore` resource in the same namespace as your
|
||||
PostgreSQL `Cluster`.
|
||||
|
||||
There is a **direct mapping** between the `.spec.backup.barmanObjectStore`
|
||||
section in CloudNativePG and the `.spec.configuration` field in the
|
||||
`ObjectStore` CR. The conversion is mostly mechanical, with one key difference:
|
||||
|
||||
:::warning
|
||||
In the plugin architecture, retention policies are defined as part of the `ObjectStore`.
|
||||
In contrast, the in-tree implementation defined them at the `Cluster` level.
|
||||
:::
|
||||
|
||||
If your `Cluster` used `.spec.backup.retentionPolicy`, move that configuration
|
||||
to `.spec.retentionPolicy` in the `ObjectStore`.
|
||||
|
||||
---
|
||||
|
||||
### Example
|
||||
|
||||
Here’s an excerpt from a traditional in-tree CloudNativePG backup configuration
|
||||
taken from the CloudNativePG Playground project:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-eu
|
||||
spec:
|
||||
# [...]
|
||||
backup:
|
||||
barmanObjectStore:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
This configuration translates to the following `ObjectStore` resource for the
|
||||
plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-eu
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
As you can see, the contents of `barmanObjectStore` have been copied directly
|
||||
under the `configuration` field of the `ObjectStore` resource, using the same
|
||||
secret references.
|
||||
|
||||
## Step 2: Update the `Cluster` for plugin WAL archiving
|
||||
|
||||
Once the `ObjectStore` resource is in place, update the `Cluster` resource as
|
||||
follows in a single atomic change:
|
||||
|
||||
- Remove the `.spec.backup.barmanObjectStore` section
|
||||
- Remove `.spec.backup.retentionPolicy` if it was defined (as it is now in the
|
||||
`ObjectStore`)
|
||||
- Remove the entire `spec.backup` section if it is now empty
|
||||
- Add `barman-cloud.cloudnative-pg.io` to the `plugins` list, as described in
|
||||
[Configuring WAL archiving](usage.md#configuring-wal-archiving)
|
||||
|
||||
This will trigger a rolling update of the `Cluster`, switching continuous
|
||||
backup from the in-tree implementation to the plugin-based approach.
|
||||
|
||||
### Example
|
||||
|
||||
The updated `pg-eu` cluster will have this configuration instead of the
|
||||
previous `backup` section:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-eu
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Update the `ScheduledBackup`
|
||||
|
||||
After switching the `Cluster` to use the plugin, update your `ScheduledBackup`
|
||||
resources to match.
|
||||
|
||||
Set the backup `method` to `plugin` and reference the plugin name via
|
||||
`pluginConfiguration`, as shown in ["Performing a base backup"](usage.md#performing-a-base-backup).
|
||||
|
||||
### Example
|
||||
|
||||
Original in-tree `ScheduledBackup`:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: pg-eu-backup
|
||||
spec:
|
||||
cluster:
|
||||
name: pg-eu
|
||||
schedule: '0 0 0 * * *'
|
||||
backupOwnerReference: self
|
||||
```
|
||||
|
||||
Updated version using the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: pg-eu-backup
|
||||
spec:
|
||||
cluster:
|
||||
name: pg-eu
|
||||
schedule: '0 0 0 * * *'
|
||||
backupOwnerReference: self
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Update the `externalClusters` configuration
|
||||
|
||||
If your `Cluster` relies on one or more external clusters that use the in-tree
|
||||
Barman Cloud integration, you need to update those configurations to use the
|
||||
plugin-based architecture.
|
||||
|
||||
When a replica cluster fetches WAL files or base backups from an external
|
||||
source that used the built-in backup method, follow these steps:
|
||||
|
||||
1. Create a corresponding `ObjectStore` resource for the external cluster, as
|
||||
shown in [Step 1](#step-1-define-the-objectstore)
|
||||
2. Update the `externalClusters` section of your replica cluster to use the
|
||||
plugin instead of the in-tree `barmanObjectStore` field
|
||||
|
||||
### Example
|
||||
|
||||
Consider the original configuration using in-tree Barman Cloud:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-us
|
||||
spec:
|
||||
# [...]
|
||||
externalClusters:
|
||||
- name: pg-eu
|
||||
barmanObjectStore:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
serverName: pg-eu
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
Create the `ObjectStore` resource for the external cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-eu
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio-eu:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-eu
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio-eu
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
Update the external cluster configuration to use the plugin:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-us
|
||||
spec:
|
||||
# [...]
|
||||
externalClusters:
|
||||
- name: pg-eu
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-eu
|
||||
serverName: pg-eu
|
||||
```
|
||||
|
||||
## Step 5: Verify your metrics
|
||||
|
||||
When migrating from the in-core solution to the plugin-based approach, you need
|
||||
to monitor a different set of metrics, as described in the
|
||||
["Observability"](observability.md) section.
|
||||
|
||||
The table below summarizes the name changes between the old in-core metrics and
|
||||
the new plugin-based ones:
|
||||
|
||||
| Old metric name | New metric name |
|
||||
| ------------------------------------------------ | ---------------------------------------------------------------- |
|
||||
| `cnpg_collector_last_failed_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp` |
|
||||
| `cnpg_collector_last_available_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp` |
|
||||
| `cnpg_collector_first_recoverability_point` | `barman_cloud_cloudnative_pg_io_first_recoverability_point` |
|
||||
76
web/versioned_docs/version-0.9.0/misc.md
Normal file
76
web/versioned_docs/version-0.9.0/misc.md
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Miscellaneous
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
## Backup Object Tagging
|
||||
|
||||
You can attach key-value metadata tags to backup artifacts—such as base
|
||||
backups, WAL files, and history files—via the `.spec.configuration` section of
|
||||
the `ObjectStore` resource.
|
||||
|
||||
- `tags`: applied to base backups and WAL files
|
||||
- `historyTags`: applied to history files only
|
||||
|
||||
### Example
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
[...]
|
||||
tags:
|
||||
backupRetentionPolicy: "expire"
|
||||
historyTags:
|
||||
backupRetentionPolicy: "keep"
|
||||
[...]
|
||||
```
|
||||
|
||||
## Extra Options for Backup and WAL Archiving
|
||||
|
||||
You can pass additional command-line arguments to `barman-cloud-backup` and
|
||||
`barman-cloud-wal-archive` using the `additionalCommandArgs` field in the
|
||||
`ObjectStore` configuration.
|
||||
|
||||
- `.spec.configuration.data.additionalCommandArgs`: for `barman-cloud-backup`
|
||||
- `.spec.configuration.wal.archiveAdditionalCommandArgs`: for `barman-cloud-wal-archive`
|
||||
|
||||
Each field accepts a list of string arguments. If an argument is already
|
||||
configured elsewhere in the plugin, the duplicate will be ignored.
|
||||
|
||||
### Example: Extra Backup Options
|
||||
|
||||
```yaml
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
data:
|
||||
additionalCommandArgs:
|
||||
- "--min-chunk-size=5MB"
|
||||
- "--read-timeout=60"
|
||||
```
|
||||
|
||||
### Example: Extra WAL Archive Options
|
||||
|
||||
```yaml
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
configuration:
|
||||
wal:
|
||||
archiveAdditionalCommandArgs:
|
||||
- "--max-concurrency=1"
|
||||
- "--read-timeout=60"
|
||||
```
|
||||
|
||||
For a complete list of supported options, refer to the
|
||||
[official Barman Cloud documentation](https://docs.pgbarman.org/release/latest/).
|
||||
454
web/versioned_docs/version-0.9.0/object_stores.md
Normal file
454
web/versioned_docs/version-0.9.0/object_stores.md
Normal file
@ -0,0 +1,454 @@
|
||||
---
|
||||
sidebar_position: 50
|
||||
---
|
||||
|
||||
# Object Store Providers
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin enables the storage of PostgreSQL cluster backup files
|
||||
in any object storage service supported by the
|
||||
[Barman Cloud infrastructure](https://docs.pgbarman.org/release/latest/).
|
||||
|
||||
Currently, Barman Cloud supports the following providers:
|
||||
|
||||
- [Amazon S3](#aws-s3)
|
||||
- [Microsoft Azure Blob Storage](#azure-blob-storage)
|
||||
- [Google Cloud Storage](#google-cloud-storage)
|
||||
|
||||
You may also use any S3- or Azure-compatible implementation of the above
|
||||
services.
|
||||
|
||||
To configure object storage with Barman Cloud, you must define an
|
||||
[`ObjectStore` object](plugin-barman-cloud.v1.md#objectstore), which
|
||||
establishes the connection between your PostgreSQL cluster and the object
|
||||
storage backend.
|
||||
|
||||
Configuration details — particularly around authentication — will vary depending on
|
||||
the specific object storage provider you are using.
|
||||
|
||||
The following sections detail the setup for each.
|
||||
|
||||
---
|
||||
|
||||
## AWS S3
|
||||
|
||||
[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is one of the
|
||||
most widely adopted object storage solutions.
|
||||
|
||||
The Barman Cloud plugin for CloudNativePG integrates with S3 through two
|
||||
primary authentication mechanisms:
|
||||
|
||||
- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) —
|
||||
recommended for clusters running on EKS
|
||||
- Access keys — using `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials
|
||||
|
||||
### Access Keys
|
||||
|
||||
To authenticate using access keys, you’ll need:
|
||||
|
||||
- `ACCESS_KEY_ID`: the public key used to authenticate to S3
|
||||
- `ACCESS_SECRET_KEY`: the corresponding secret key
|
||||
- `ACCESS_SESSION_TOKEN`: (optional) a temporary session token, if required
|
||||
|
||||
These credentials must be stored securely in a Kubernetes secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic aws-creds \
|
||||
--from-literal=ACCESS_KEY_ID=<access key here> \
|
||||
--from-literal=ACCESS_SECRET_KEY=<secret key here>
|
||||
# --from-literal=ACCESS_SESSION_TOKEN=<session token here> # if required
|
||||
```
|
||||
|
||||
The credentials will be encrypted at rest if your Kubernetes environment
|
||||
supports it.
|
||||
|
||||
You can then reference the secret in your `ObjectStore` definition:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: aws-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: aws-creds
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: aws-creds
|
||||
key: ACCESS_SECRET_KEY
|
||||
[...]
|
||||
```
|
||||
|
||||
### IAM Role for Service Account (IRSA)
|
||||
|
||||
To use IRSA with EKS, configure the service account of the PostgreSQL cluster
|
||||
with the appropriate annotation:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
[...]
|
||||
spec:
|
||||
serviceAccountTemplate:
|
||||
metadata:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: arn:[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
### S3 Lifecycle Policy
|
||||
|
||||
Barman Cloud uploads backup files to S3 but does not modify or delete them afterward.
|
||||
To enhance data durability and protect against accidental or malicious loss,
|
||||
it's recommended to implement the following best practices:
|
||||
|
||||
- Enable object versioning
|
||||
- Enable object locking to prevent objects from being deleted or overwritten
|
||||
for a defined period or indefinitely (this provides an additional layer of
|
||||
protection against accidental deletion and ransomware attacks)
|
||||
- Set lifecycle rules to expire current versions a few days after your Barman
|
||||
retention window
|
||||
- Expire non-current versions after a longer period
|
||||
|
||||
These strategies help you safeguard backups without requiring broad delete
|
||||
permissions, ensuring both security and compliance with minimal operational
|
||||
overhead.
|
||||
|
||||
|
||||
### S3-Compatible Storage Providers
|
||||
|
||||
You can use S3-compatible services like **MinIO**, **Linode (Akamai) Object Storage**,
|
||||
or **DigitalOcean Spaces** by specifying a custom `endpointURL`.
|
||||
|
||||
Example with Linode (Akamai) Object Storage (`us-east1`):
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: linode-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/"
|
||||
endpointURL: "https://us-east1.linodeobjects.com"
|
||||
s3Credentials:
|
||||
[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
Recent changes to the [boto3 implementation](https://github.com/boto/boto3/issues/4392)
|
||||
of [Amazon S3 Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)
|
||||
may lead to the `x-amz-content-sha256` error when using the Barman Cloud
|
||||
Plugin.
|
||||
|
||||
If you encounter this issue (see [GitHub issue #393](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/393)),
|
||||
you can apply the following workaround by setting specific environment
|
||||
variables in the `ObjectStore` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: linode-store
|
||||
spec:
|
||||
instanceSidecarConfiguration:
|
||||
env:
|
||||
- name: AWS_REQUEST_CHECKSUM_CALCULATION
|
||||
value: when_required
|
||||
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
|
||||
value: when_required
|
||||
[...]
|
||||
```
|
||||
|
||||
These settings ensure that checksum calculations and validations are only
|
||||
applied when explicitly required, avoiding compatibility issues with certain
|
||||
S3-compatible storage providers.
|
||||
|
||||
Example with DigitalOcean Spaces (SFO3, path-style):
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: digitalocean-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||
endpointURL: "https://sfo3.digitaloceanspaces.com"
|
||||
s3Credentials:
|
||||
[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
### Using Object Storage with a Private CA
|
||||
|
||||
For object storage services (e.g., MinIO) that use HTTPS with certificates
|
||||
signed by a private CA, set the `endpointCA` field in the `ObjectStore`
|
||||
definition. Unless you already have it, create a Kubernetes `Secret` with the
|
||||
CA bundle:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic my-ca-secret --from-file=ca.crt
|
||||
```
|
||||
|
||||
Then reference it:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
endpointURL: <myEndpointURL>
|
||||
endpointCA:
|
||||
name: my-ca-secret
|
||||
key: ca.crt
|
||||
[...]
|
||||
```
|
||||
|
||||
<!-- TODO: does this also apply to the plugin? -->
|
||||
:::note
|
||||
If you want `ConfigMaps` and `Secrets` to be **automatically** reloaded by
|
||||
instances, you can add a label with the key `cnpg.io/reload` to the
|
||||
`Secrets`/`ConfigMaps`. Otherwise, you will have to reload the instances using the
|
||||
`kubectl cnpg reload` subcommand.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## Azure Blob Storage
|
||||
|
||||
[Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/)
|
||||
is Microsoft’s cloud-based object storage solution.
|
||||
|
||||
Barman Cloud supports the following authentication methods:
|
||||
|
||||
- [Connection String](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string)
|
||||
- Storage Account Name + [Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
|
||||
- Storage Account Name + [SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
|
||||
- [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
|
||||
|
||||
### Azure AD Workload Identity
|
||||
|
||||
This method avoids storing credentials in Kubernetes via the
|
||||
`.spec.configuration.inheritFromAzureAD` option:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: azure-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "<destination path here>"
|
||||
azureCredentials:
|
||||
inheritFromAzureAD: true
|
||||
[...]
|
||||
```
|
||||
|
||||
### Access Key, SAS Token, or Connection String
|
||||
|
||||
Store credentials in a Kubernetes secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic azure-creds \
|
||||
--from-literal=AZURE_STORAGE_ACCOUNT=<storage account name> \
|
||||
--from-literal=AZURE_STORAGE_KEY=<storage account key> \
|
||||
--from-literal=AZURE_STORAGE_SAS_TOKEN=<SAS token> \
|
||||
--from-literal=AZURE_STORAGE_CONNECTION_STRING=<connection string>
|
||||
```
|
||||
|
||||
Then reference the required keys in your `ObjectStore`:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: azure-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "<destination path here>"
|
||||
azureCredentials:
|
||||
connectionString:
|
||||
name: azure-creds
|
||||
key: AZURE_CONNECTION_STRING
|
||||
storageAccount:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_ACCOUNT
|
||||
storageKey:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_KEY
|
||||
storageSasToken:
|
||||
name: azure-creds
|
||||
key: AZURE_STORAGE_SAS_TOKEN
|
||||
[...]
|
||||
```
|
||||
|
||||
For Azure Blob, the destination path format is:
|
||||
|
||||
```
|
||||
<http|https>://<account-name>.<service-name>.core.windows.net/<container>/<blob>
|
||||
```
|
||||
|
||||
### Azure-Compatible Providers
|
||||
|
||||
If you're using a different implementation (e.g., Azurite or emulator):
|
||||
|
||||
```
|
||||
<http|https>://<local-machine-address>:<port>/<account-name>/<container>/<blob>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
[Google Cloud Storage](https://cloud.google.com/storage/) is supported with two
|
||||
authentication modes:
|
||||
|
||||
- **GKE Workload Identity** (recommended inside Google Kubernetes Engine)
|
||||
- **Service Account JSON key** via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
||||
|
||||
### GKE Workload Identity
|
||||
|
||||
Use the [Workload Identity authentication](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
|
||||
when running in GKE:
|
||||
|
||||
1. Set `googleCredentials.gkeEnvironment` to `true` in the `ObjectStore`
|
||||
resource
|
||||
2. Annotate the `serviceAccountTemplate` in the `Cluster` resource with the GCP
|
||||
service account
|
||||
|
||||
For example, in the `ObjectStore` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: google-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "gs://<bucket>/<folder>"
|
||||
googleCredentials:
|
||||
gkeEnvironment: true
|
||||
```
|
||||
|
||||
And in the `Cluster` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
spec:
|
||||
serviceAccountTemplate:
|
||||
metadata:
|
||||
annotations:
|
||||
iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
### Service Account JSON Key
|
||||
|
||||
Follow Google’s [authentication setup](https://cloud.google.com/docs/authentication/getting-started),
|
||||
then:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: google-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: "gs://<bucket>/<folder>"
|
||||
googleCredentials:
|
||||
applicationCredentials:
|
||||
name: backup-creds
|
||||
key: gcsCredentials
|
||||
[...]
|
||||
```
|
||||
|
||||
:::important
|
||||
This authentication method generates a JSON file within the container
|
||||
with all the credentials required to access your Google Cloud Storage
|
||||
bucket. As a result, if someone gains access to the `Pod`, they will also have
|
||||
write permissions to the bucket.
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
|
||||
## MinIO Object Store
|
||||
|
||||
In order to use the Tenant resource you first need to deploy the
|
||||
[MinIO operator](https://docs.min.io/community/minio-object-store/operations/deployments/installation.html).
|
||||
For the latest documentation of MinIO, please refer to the
|
||||
[MinIO official documentation](https://docs.min.io/community/minio-object-store/).
|
||||
|
||||
MinIO Object Store's API is compatible with S3, and the default configuration of the Tenant
|
||||
will create these services:
|
||||
- `<tenant>-console` on port 9090 (with autocert) or 9443 (without autocert)
|
||||
- `<tenant>-hl` on port 9000
|
||||
Where `<tenant>` is the `metadata.name` you assigned to your Tenant resource.
|
||||
|
||||
:::note
|
||||
The `<tenant>-console` service will only be available if you have enabled the
|
||||
[MinIO Console](https://docs.min.io/community/minio-object-store/administration/minio-console.html).
|
||||
|
||||
For example, the following Tenant:
|
||||
```yml
|
||||
apiVersion: minio.min.io/v2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: cnpg-backups
|
||||
spec:
|
||||
[...]
|
||||
```
|
||||
would have services called `cnpg-backups-console` and `cnpg-backups-hl` respectively.
|
||||
|
||||
The `console` service is for managing the tenant, while the `hl` service exposes the S3
|
||||
compatible API. If your tenant is configured with `requestAutoCert` you will communicate
|
||||
to these services over HTTPS, if not you will use HTTP.
|
||||
|
||||
For authentication you can use your username and password, or create an access key.
|
||||
Whichever method you choose, it has to be stored as a secret.
|
||||
|
||||
```sh
|
||||
kubectl create secret generic minio-creds \
|
||||
--from-literal=MINIO_ACCESS_KEY=<minio access key or username> \
|
||||
--from-literal=MINIO_SECRET_KEY=<minio secret key or password>
|
||||
```
|
||||
|
||||
Finally, create the Barman ObjectStore:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://BUCKET_NAME/
|
||||
endpointURL: http://<tenant>-hl:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio-creds
|
||||
key: MINIO_ACCESS_KEY
|
||||
secretAccessKey:
|
||||
name: minio-creds
|
||||
key: MINIO_SECRET_KEY
|
||||
[...]
|
||||
```
|
||||
|
||||
:::important
|
||||
Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
|
||||
proceeding with a backup.
|
||||
:::
|
||||
|
||||
---
|
||||
24
web/versioned_docs/version-0.9.0/observability.md
Normal file
24
web/versioned_docs/version-0.9.0/observability.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 55
|
||||
---
|
||||
|
||||
# Observability
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin exposes the following metrics through the native
|
||||
Prometheus exporter of the instance manager:
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent failed backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp`:
|
||||
the UNIX timestamp of the most recent successfully available backup.
|
||||
|
||||
- `barman_cloud_cloudnative_pg_io_first_recoverability_point`:
|
||||
the UNIX timestamp representing the earliest point in time from which the
|
||||
cluster can be recovered.
|
||||
|
||||
These metrics supersede the previously available in-core metrics that used the
|
||||
`cnpg_collector` prefix. The new metrics are exposed under the
|
||||
`barman_cloud_cloudnative_pg_io` prefix instead.
|
||||
19
web/versioned_docs/version-0.9.0/parameters.md
Normal file
19
web/versioned_docs/version-0.9.0/parameters.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
sidebar_position: 100
|
||||
---
|
||||
|
||||
# Parameters
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The following parameters are available for the Barman Cloud Plugin:
|
||||
|
||||
- `barmanObjectName`: references the `ObjectStore` resource to be used by the
|
||||
plugin.
|
||||
- `serverName`: Specifies the server name in the object store.
|
||||
|
||||
:::important
|
||||
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||
:::
|
||||
108
web/versioned_docs/version-0.9.0/plugin-barman-cloud.v1.md
Normal file
108
web/versioned_docs/version-0.9.0/plugin-barman-cloud.v1.md
Normal file
@ -0,0 +1,108 @@
|
||||
# API Reference
|
||||
|
||||
## Packages
|
||||
- [barmancloud.cnpg.io/v1](#barmancloudcnpgiov1)
|
||||
|
||||
|
||||
## barmancloud.cnpg.io/v1
|
||||
|
||||
Package v1 contains API Schema definitions for the barmancloud v1 API group
|
||||
|
||||
### Resource Types
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
|
||||
|
||||
#### InstanceSidecarConfiguration
|
||||
|
||||
|
||||
|
||||
InstanceSidecarConfiguration defines the configuration for the sidecar that runs in the instance pods.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStoreSpec](#objectstorespec)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envvar-v1-core) array_ | The environment to be explicitly passed to the sidecar | | | |
|
||||
| `retentionPolicyIntervalSeconds` _integer_ | The retentionCheckInterval defines the frequency at which the<br />system checks and enforces retention policies. | | 1800 | |
|
||||
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Resources define cpu/memory requests and limits for the sidecar that runs in the instance pods. | | | |
|
||||
| `additionalContainerArgs` _string array_ | AdditionalContainerArgs is an optional list of command-line arguments<br />to be passed to the sidecar container when it starts.<br />The provided arguments are appended to the container’s default arguments. | | | |
|
||||
| `logLevel` _string_ | The log level for PostgreSQL instances. Valid values are: `error`, `warning`, `info` (default), `debug`, `trace` | | info | Enum: [error warning info debug trace] <br /> |
|
||||
|
||||
|
||||
#### ObjectStore
|
||||
|
||||
|
||||
|
||||
ObjectStore is the Schema for the objectstores API.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `barmancloud.cnpg.io/v1` | True | | |
|
||||
| `kind` _string_ | `ObjectStore` | True | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | True | | |
|
||||
| `spec` _[ObjectStoreSpec](#objectstorespec)_ | Specification of the desired behavior of the ObjectStore.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | True | | |
|
||||
| `status` _[ObjectStoreStatus](#objectstorestatus)_ | Most recently observed status of the ObjectStore. This data may not be up to<br />date. Populated by the system. Read-only.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | |
|
||||
|
||||
|
||||
#### ObjectStoreSpec
|
||||
|
||||
|
||||
|
||||
ObjectStoreSpec defines the desired state of ObjectStore.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `configuration` _[BarmanObjectStoreConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration)_ | The configuration for the barman-cloud tool suite | True | | |
|
||||
| `retentionPolicy` _string_ | RetentionPolicy is the retention policy to be used for backups<br />and WALs (i.e. '60d'). The retention policy is expressed in the form<br />of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -<br />days, weeks, months. | | | Pattern: `^[1-9][0-9]*[dwm]$` <br /> |
|
||||
| `instanceSidecarConfiguration` _[InstanceSidecarConfiguration](#instancesidecarconfiguration)_ | The configuration for the sidecar that runs in the instance pods | | | |
|
||||
|
||||
|
||||
#### ObjectStoreStatus
|
||||
|
||||
|
||||
|
||||
ObjectStoreStatus defines the observed state of ObjectStore.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStore](#objectstore)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `serverRecoveryWindow` _object (keys:string, values:[RecoveryWindow](#recoverywindow))_ | ServerRecoveryWindow maps each server to its recovery window | True | | |
|
||||
|
||||
|
||||
#### RecoveryWindow
|
||||
|
||||
|
||||
|
||||
RecoveryWindow represents the time span between the first
|
||||
recoverability point and the last successful backup of a PostgreSQL
|
||||
server, defining the period during which data can be restored.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ObjectStoreStatus](#objectstorestatus)
|
||||
|
||||
| Field | Description | Required | Default | Validation |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `firstRecoverabilityPoint` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The first recoverability point in a PostgreSQL server refers to<br />the earliest point in time to which the database can be<br />restored. | True | | |
|
||||
| `lastSuccessfulBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last successful backup time | True | | |
|
||||
| `lastFailedBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last failed backup time | True | | |
|
||||
|
||||
|
||||
219
web/versioned_docs/version-0.9.0/resource-name-migration.md
Normal file
219
web/versioned_docs/version-0.9.0/resource-name-migration.md
Normal file
@ -0,0 +1,219 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Resource name migration guide
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
:::warning
|
||||
Before proceeding with the migration process, please:
|
||||
1. **Read this guide in its entirety** to understand what changes will be made
|
||||
2. **Test in a non-production environment** first if possible
|
||||
3. **Ensure you have proper backups** of your cluster configuration
|
||||
|
||||
This migration will delete old RBAC resources only after the
|
||||
`plugin-barman-cloud` upgrade. While the operation is designed to be safe, you
|
||||
should review and understand the changes before proceeding. The maintainers of
|
||||
this project are not responsible for any issues that may arise during
|
||||
migration.
|
||||
|
||||
**Note:** This guide assumes you are using the default `cnpg-system` namespace.
|
||||
:::
|
||||
|
||||
## Overview
|
||||
|
||||
Starting from version **0.8.0**, the `plugin-barman-cloud` deployment manifests
|
||||
use more specific, prefixed resource names to avoid conflicts with other
|
||||
components deployed in the same Kubernetes cluster.
|
||||
|
||||
## What Changed
|
||||
|
||||
The following resources have been renamed to use proper prefixes.
|
||||
|
||||
### Cluster-scoped Resources
|
||||
|
||||
| Old Name | New Name |
|
||||
|----------------------------|------------------------------------------|
|
||||
| `metrics-auth-role` | `barman-plugin-metrics-auth-role` |
|
||||
| `metrics-auth-rolebinding` | `barman-plugin-metrics-auth-rolebinding` |
|
||||
| `metrics-reader` | `barman-plugin-metrics-reader` |
|
||||
| `objectstore-viewer-role` | `barman-plugin-objectstore-viewer-role` |
|
||||
| `objectstore-editor-role` | `barman-plugin-objectstore-editor-role` |
|
||||
|
||||
### Namespace-scoped Resources
|
||||
|
||||
| Old Name | New Name | Namespace |
|
||||
|-------------------------------|---------------------------------------------|---------------|
|
||||
| `leader-election-role` | `barman-plugin-leader-election-role` | `cnpg-system` |
|
||||
| `leader-election-rolebinding` | `barman-plugin-leader-election-rolebinding` | `cnpg-system` |
|
||||
|
||||
## Why This Change?
|
||||
|
||||
Using generic names for cluster-wide resources is discouraged as they may
|
||||
conflict with other components deployed in the same cluster. The new names make
|
||||
it clear that these resources belong to the Barman Cloud plugin and help avoid
|
||||
naming collisions.
|
||||
|
||||
## Migration Instructions
|
||||
|
||||
This three steps migration process is straightforward and can be completed with
|
||||
a few `kubectl` commands.
|
||||
|
||||
### Step 1: Upgrade plugin-barman-cloud
|
||||
|
||||
Please refer to the [Installation](installation.mdx) section to deploy the new
|
||||
`plugin-barman-cloud` release.
|
||||
|
||||
### Step 2: Delete Old Cluster-scoped Resources
|
||||
|
||||
:::danger Verify Resources Before Deletion
|
||||
**IMPORTANT**: The old resource names are generic and could potentially belong
|
||||
to other components in your cluster.
|
||||
|
||||
**Before deleting each resource, verify it belongs to the Barman Cloud plugin
|
||||
by checking:**
|
||||
- For `objectstore-*` roles: Look for `barmancloud.cnpg.io` in the API groups
|
||||
- For `metrics-*` roles: Check if they reference the `plugin-barman-cloud`
|
||||
ServiceAccount in `cnpg-system` namespace
|
||||
- For other roles: Look for labels like `app.kubernetes.io/name: plugin-barman-cloud`
|
||||
|
||||
If a resource doesn't have these indicators, **DO NOT DELETE IT** as it may
|
||||
belong to another application.
|
||||
|
||||
Carefully review the output of each verification command before proceeding with
|
||||
the `delete`.
|
||||
:::
|
||||
|
||||
:::tip Dry Run First
|
||||
You can add `--dry-run=client` to any `kubectl delete` command to preview what
|
||||
would be deleted without actually removing anything.
|
||||
:::
|
||||
|
||||
**Only proceed if you've verified these resources belong to the Barman Cloud
|
||||
plugin (see warning above).**
|
||||
|
||||
For each resource below, first verify it belongs to Barman Cloud, then delete
|
||||
it:
|
||||
|
||||
```bash
|
||||
# 1. Check metrics-auth-rolebinding FIRST (we'll check the role after)
|
||||
# Look for references to plugin-barman-cloud ServiceAccount
|
||||
kubectl describe clusterrolebinding metrics-auth-rolebinding
|
||||
# If it references plugin-barman-cloud ServiceAccount in cnpg-system namespace,
|
||||
# delete it:
|
||||
kubectl delete clusterrolebinding metrics-auth-rolebinding
|
||||
|
||||
# 2. Check metrics-auth-role
|
||||
# Look for references to authentication.k8s.io and authorization.k8s.io
|
||||
kubectl describe clusterrole metrics-auth-role
|
||||
# Verify it's not being used by any other rolebindings:
|
||||
kubectl get clusterrolebinding -o json \
|
||||
| jq -r '.items[] | select(.roleRef.name=="metrics-auth-role") \
|
||||
| .metadata.name'
|
||||
# If the above returns nothing (role is not in use) and the role looks like the
|
||||
# Barman Cloud one, delete it (see warnings section):
|
||||
kubectl delete clusterrole metrics-auth-role
|
||||
|
||||
# 3. Check objectstore-viewer-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-viewer-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-viewer-role
|
||||
|
||||
# 4. Check objectstore-editor-role
|
||||
# Look for barmancloud.cnpg.io API group or
|
||||
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||
kubectl describe clusterrole objectstore-editor-role
|
||||
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||
kubectl delete clusterrole objectstore-editor-role
|
||||
|
||||
# 5. Check metrics-reader (MOST DANGEROUS - very generic name)
|
||||
# First, check if it's being used by any rolebindings OTHER than barman's:
|
||||
kubectl get clusterrolebinding -o json | jq -r '.items[] \
|
||||
| select(.roleRef.name=="metrics-reader") \
|
||||
| "\(.metadata.name) -> \(.subjects[0].name) in \(.subjects[0].namespace)"'
|
||||
# If this shows ANY rolebindings, review them carefully. Only proceed if
|
||||
# they're all Barman-related. Then check the role itself:
|
||||
kubectl describe clusterrole metrics-reader
|
||||
# If it ONLY has nonResourceURLs: /metrics and NO other rolebindings use it,
|
||||
# delete it:
|
||||
kubectl delete clusterrole metrics-reader
|
||||
```
|
||||
|
||||
:::warning
|
||||
The `metrics-reader` role is particularly dangerous to delete blindly. Many
|
||||
monitoring systems use this exact name. Only delete it if:
|
||||
|
||||
1. You've verified it ONLY grants access to `/metrics`
|
||||
2. No other rolebindings reference it (checked with the jq command above)
|
||||
3. You're certain it was created by the Barman Cloud plugin
|
||||
|
||||
If you're unsure, it's safer to leave it and let the new
|
||||
`barman-plugin-metrics-reader` role coexist with it.
|
||||
:::
|
||||
|
||||
If any resource is not found during the `describe` command, that's okay - it
|
||||
means it was never created or already deleted. Simply skip the delete command
|
||||
for that resource.
|
||||
|
||||
### Step 3: Delete Old Namespace-scoped Resources
|
||||
|
||||
Delete the old namespace-scoped resources in the `cnpg-system` namespace:
|
||||
|
||||
```bash
|
||||
# Delete the old leader-election resources
|
||||
kubectl delete role leader-election-role -n cnpg-system
|
||||
kubectl delete rolebinding leader-election-rolebinding -n cnpg-system
|
||||
```
|
||||
|
||||
If any resource is not found, that's okay - it means it was never created or
|
||||
already deleted.
|
||||
|
||||
## Impact
|
||||
|
||||
- **Permissions:** If you have custom RBAC rules or tools that reference the
|
||||
old resource names, they will need to be updated.
|
||||
- **External Users:** If end users have been granted the
|
||||
`objectstore-viewer-role` or `objectstore-editor-role`, they will need to be
|
||||
re-granted the new role names (`barman-plugin-objectstore-viewer-role` and
|
||||
`barman-plugin-objectstore-editor-role`).
|
||||
|
||||
## Verification
|
||||
|
||||
After migration, verify that the new resources are created:
|
||||
|
||||
```bash
|
||||
# Check cluster-scoped resources
|
||||
kubectl get clusterrole | grep barman
|
||||
kubectl get clusterrolebinding | grep barman
|
||||
|
||||
# Check namespace-scoped resources
|
||||
kubectl get role,rolebinding -n cnpg-system | grep barman
|
||||
```
|
||||
|
||||
You should see the new prefixed resource names.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Plugin Not Starting After Migration
|
||||
|
||||
If the plugin fails to start after migration, check:
|
||||
|
||||
1. **ServiceAccount permissions:** Ensure the `plugin-barman-cloud` ServiceAccount is bound to the new roles:
|
||||
```bash
|
||||
kubectl get clusterrolebinding barman-plugin-metrics-auth-rolebinding -o yaml
|
||||
kubectl get rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system -o yaml
|
||||
```
|
||||
|
||||
2. **Role references:** Verify that the rolebindings reference the correct role names:
|
||||
```bash
|
||||
kubectl describe rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system
|
||||
kubectl describe clusterrolebinding barman-plugin-metrics-auth-rolebinding
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues during migration, please open an issue on the [GitHub
|
||||
repository](https://github.com/cloudnative-pg/plugin-barman-cloud/issues).
|
||||
38
web/versioned_docs/version-0.9.0/retention.md
Normal file
38
web/versioned_docs/version-0.9.0/retention.md
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
sidebar_position: 60
|
||||
---
|
||||
|
||||
# Retention Policies
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
The Barman Cloud Plugin supports **automated cleanup of obsolete backups** via
|
||||
retention policies, configured in the `.spec.retentionPolicy` field of the
|
||||
`ObjectStore` resource.
|
||||
|
||||
:::note
|
||||
This feature uses the `barman-cloud-backup-delete` command with the
|
||||
`--retention-policy "RECOVERY WINDOW OF {{ value }} {{ unit }}"` syntax.
|
||||
:::
|
||||
|
||||
#### Example: 30-Day Retention Policy
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: my-store
|
||||
spec:
|
||||
[...]
|
||||
retentionPolicy: "30d"
|
||||
````
|
||||
|
||||
:::note
|
||||
A **recovery window retention policy** ensures the cluster can be restored to
|
||||
any point in time between the calculated *Point of Recoverability* (PoR) and
|
||||
the latest WAL archive. The PoR is defined as `current time - recovery window`.
|
||||
The **first valid backup** is the most recent backup completed before the PoR.
|
||||
Backups older than that are marked as *obsolete* and deleted after the next
|
||||
backup completes.
|
||||
:::
|
||||
|
||||
580
web/versioned_docs/version-0.9.0/troubleshooting.md
Normal file
580
web/versioned_docs/version-0.9.0/troubleshooting.md
Normal file
@ -0,0 +1,580 @@
|
||||
---
|
||||
sidebar_position: 90
|
||||
---
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
This guide helps you diagnose and resolve common issues with the Barman Cloud
|
||||
plugin.
|
||||
|
||||
:::important
|
||||
We are continuously improving the integration between CloudNativePG and the
|
||||
Barman Cloud plugin as it moves toward greater stability and maturity. For this
|
||||
reason, we recommend using the latest available version of both components.
|
||||
See the [*Requirements* section](intro.md#requirements) for details.
|
||||
:::
|
||||
|
||||
:::note
|
||||
The following commands assume you installed the CloudNativePG operator in
|
||||
the default `cnpg-system` namespace. If you installed it in a different
|
||||
namespace, adjust the commands accordingly.
|
||||
:::
|
||||
|
||||
## Viewing Logs
|
||||
|
||||
To troubleshoot effectively, you’ll often need to review logs from multiple
|
||||
sources:
|
||||
|
||||
```sh
|
||||
# View operator logs (includes plugin interaction logs)
|
||||
kubectl logs -n cnpg-system deployment/cnpg-controller-manager -f
|
||||
|
||||
# View plugin manager logs
|
||||
kubectl logs -n cnpg-system deployment/barman-cloud -f
|
||||
|
||||
# View sidecar container logs (Barman Cloud operations)
|
||||
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud -f
|
||||
|
||||
# View all containers in a pod
|
||||
kubectl logs -n <namespace> <cluster-pod-name> --all-containers=true
|
||||
|
||||
# View previous container logs (if container restarted)
|
||||
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud --previous
|
||||
```
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Plugin Installation Issues
|
||||
|
||||
#### Plugin pods not starting
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Plugin pods stuck in `CrashLoopBackOff` or `Error`
|
||||
- Plugin deployment not ready
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
1. **Certificate issues**
|
||||
|
||||
```sh
|
||||
# Check if cert-manager is installed and running
|
||||
kubectl get pods -n cert-manager
|
||||
|
||||
# Check if the plugin certificate is created
|
||||
kubectl get certificates -n cnpg-system
|
||||
```
|
||||
|
||||
If cert-manager is not installed, install it first:
|
||||
|
||||
```sh
|
||||
# Note: other installation methods for cert-manager are available
|
||||
kubectl apply -f \
|
||||
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
|
||||
```
|
||||
|
||||
If you are using your own certificates without cert-manager, you will need
|
||||
to verify the entire certificate chain yourself.
|
||||
|
||||
|
||||
2. **Image pull errors**
|
||||
|
||||
```sh
|
||||
# Check pod events for image pull errors
|
||||
kubectl describe pod -n cnpg-system -l app=barman-cloud
|
||||
```
|
||||
|
||||
Verify the image exists and you have proper credentials if using a private
|
||||
registry.
|
||||
|
||||
|
||||
3. **Resource constraints**
|
||||
|
||||
```sh
|
||||
# Check node resources
|
||||
kubectl top nodes
|
||||
kubectl describe nodes
|
||||
```
|
||||
|
||||
Make sure your cluster has sufficient CPU and memory resources.
|
||||
|
||||
### Backup Failures
|
||||
|
||||
#### Quick Backup Troubleshooting Checklist
|
||||
|
||||
When a backup fails, follow these steps in order:
|
||||
|
||||
1. **Check backup status**:
|
||||
|
||||
```sh
|
||||
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||
```
|
||||
2. **Get error details and target pod**:
|
||||
|
||||
```sh
|
||||
kubectl describe backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name>
|
||||
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}'
|
||||
```
|
||||
3. **Check the target pod’s sidecar logs**:
|
||||
|
||||
```sh
|
||||
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}')
|
||||
|
||||
kubectl logs \
|
||||
-n <namespace> $TARGET_POD -c plugin-barman-cloud \
|
||||
--tail=100 | grep -E "ERROR|FATAL|panic"
|
||||
```
|
||||
4. **Check cluster events**:
|
||||
|
||||
```sh
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.name=<cluster-name> \
|
||||
--sort-by='.lastTimestamp'
|
||||
```
|
||||
5. **Verify plugin is running**:
|
||||
|
||||
```sh
|
||||
kubectl get pods \
|
||||
-n cnpg-system -l app=barman-cloud
|
||||
```
|
||||
6. **Check operator logs**:
|
||||
|
||||
```sh
|
||||
kubectl logs \
|
||||
-n cnpg-system deployment/cnpg-controller-manager \
|
||||
--tail=100 | grep -i "backup\|plugin"
|
||||
```
|
||||
7. **Check plugin manager logs**:
|
||||
|
||||
```sh
|
||||
kubectl logs \
|
||||
-n cnpg-system deployment/barman-cloud --tail=100
|
||||
```
|
||||
|
||||
#### Backup job fails immediately
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Backup pods terminate with error
|
||||
- No backup files appear in object storage
|
||||
- Backup shows `failed` phase with various error messages
|
||||
|
||||
**Common failure modes and solutions:**
|
||||
|
||||
1. **"requested plugin is not available" errors**
|
||||
|
||||
```
|
||||
requested plugin is not available: barman
|
||||
requested plugin is not available: barman-cloud
|
||||
requested plugin is not available: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
**Cause:** The plugin name in the Cluster configuration doesn’t match the
|
||||
deployed plugin, or the plugin isn’t registered.
|
||||
|
||||
**Solution:**
|
||||
|
||||
a. **Check plugin registration:**
|
||||
|
||||
```sh
|
||||
# If you have the `cnpg` plugin installed (v1.27.0+)
|
||||
kubectl cnpg status -n <namespace> <cluster-name>
|
||||
```
|
||||
|
||||
Look for the "Plugins status" section:
|
||||
```
|
||||
Plugins status
|
||||
Name Version Status Reported Operator Capabilities
|
||||
---- ------- ------ ------------------------------
|
||||
barman-cloud.cloudnative-pg.io 0.6.0 N/A Reconciler Hooks, Lifecycle Service
|
||||
```
|
||||
|
||||
b. **Verify plugin name in `Cluster` spec**:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
spec:
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectStore: <your-objectstore-name>
|
||||
```
|
||||
|
||||
c. **Check plugin deployment is running**:
|
||||
|
||||
```sh
|
||||
kubectl get deployment -n cnpg-system barman-cloud
|
||||
```
|
||||
|
||||
2. **"rpc error: code = Unknown desc = panic caught: assignment to entry in nil map" errors**
|
||||
|
||||
**Cause:** Misconfiguration in the `ObjectStore` (e.g., typo or missing field).
|
||||
|
||||
**Solution:**
|
||||
|
||||
- Review sidecar logs for details
|
||||
- Verify `ObjectStore` configuration and secrets
|
||||
- Common issues include:
|
||||
- Missing or incorrect secret references
|
||||
- Typos in configuration parameters
|
||||
- Missing required environment variables in secrets
|
||||
|
||||
#### Backup performance issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Backups take extremely long
|
||||
- Backups timeout
|
||||
|
||||
**Plugin-specific considerations:**
|
||||
|
||||
1. **Check `ObjectStore` parallelism settings**
|
||||
- Adjust `maxParallel` in `ObjectStore` configuration
|
||||
- Monitor sidecar container resource usage during backups
|
||||
|
||||
2. **Verify plugin resource allocation**
|
||||
- Check if the sidecar container has sufficient CPU/memory
|
||||
- Review plugin container logs for resource-related warnings
|
||||
|
||||
:::tip
|
||||
For Barman-specific features like compression, encryption, and performance
|
||||
tuning, refer to the [Barman documentation](https://docs.pgbarman.org/latest/).
|
||||
:::
|
||||
|
||||
### WAL Archiving Issues
|
||||
|
||||
#### WAL archiving stops
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- WAL files accumulate on the primary
|
||||
- Cluster shows WAL archiving warnings
|
||||
- Sidecar logs show WAL errors
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Check plugin sidecar logs for WAL archiving errors**
|
||||
```sh
|
||||
# Check recent WAL archive operations in sidecar
|
||||
kubectl logs -n <namespace> <primary-pod> -c plugin-barman-cloud \
|
||||
--tail=50 | grep -i wal
|
||||
```
|
||||
|
||||
2. **Check ObjectStore configuration for WAL settings**
|
||||
- Ensure ObjectStore has proper WAL retention settings
|
||||
- Verify credentials have permissions for WAL operations
|
||||
|
||||
### Restore Issues
|
||||
|
||||
#### Restore fails during recovery
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- New cluster stuck in recovery
|
||||
- Plugin sidecar shows restore errors
|
||||
- PostgreSQL won’t start
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Check plugin sidecar logs during restore**
|
||||
|
||||
```sh
|
||||
# Check the sidecar logs on the recovering cluster pods
|
||||
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||
-c plugin-barman-cloud --tail=100
|
||||
|
||||
# Look for restore-related errors
|
||||
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||
-c plugin-barman-cloud | grep -E "restore|recovery|ERROR"
|
||||
```
|
||||
|
||||
2. **Verify plugin can access backups**
|
||||
|
||||
```sh
|
||||
# Check if `ObjectStore` is properly configured for restore
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Check PostgreSQL recovery logs
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c postgres | grep -i recovery
|
||||
```
|
||||
|
||||
:::tip
|
||||
For detailed Barman restore operations and troubleshooting, refer to the
|
||||
[Barman documentation](https://docs.pgbarman.org/latest/barman-cloud-restore.html).
|
||||
:::
|
||||
|
||||
#### Point-in-time recovery (PITR) configuration issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- PITR doesn’t reach target time
|
||||
- WAL access errors
|
||||
- Recovery halts early
|
||||
|
||||
**Debugging steps:**
|
||||
|
||||
1. **Verify PITR configuration in the `Cluster` spec**
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: <cluster-restore-name>
|
||||
spec:
|
||||
storage:
|
||||
size: 1Gi
|
||||
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: origin
|
||||
recoveryTarget:
|
||||
targetTime: "2024-01-15 10:30:00"
|
||||
|
||||
externalClusters:
|
||||
- name: origin
|
||||
plugin:
|
||||
enabled: true
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: <object-store-name>
|
||||
serverName: <source-cluster-name>
|
||||
```
|
||||
|
||||
2. **Check sidecar logs for WAL-related errors**
|
||||
|
||||
```sh
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c plugin-barman-cloud | grep -i wal
|
||||
```
|
||||
|
||||
:::note
|
||||
For detailed PITR configuration and WAL management, see the
|
||||
[Barman PITR documentation](https://docs.pgbarman.org/latest/).
|
||||
:::
|
||||
|
||||
### Plugin Configuration Issues
|
||||
|
||||
#### Plugin cannot connect to object storage
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Sidecar logs show connection errors
|
||||
- Backups fail with authentication or network errors
|
||||
- `ObjectStore` resource reports errors
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. **Verify `ObjectStore` CRD configuration and secrets**
|
||||
|
||||
```sh
|
||||
# Check ObjectStore resource status
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Verify the secret exists and has correct keys for your provider
|
||||
kubectl get secret -n <namespace> <secret-name> \
|
||||
-o jsonpath='{.data}' | jq 'keys'
|
||||
```
|
||||
|
||||
2. **Check sidecar logs for connectivity issues**
|
||||
```sh
|
||||
kubectl logs -n <namespace> <cluster-pod> \
|
||||
-c plugin-barman-cloud | grep -E "connect|timeout|SSL|cert"
|
||||
```
|
||||
|
||||
3. **Adjust provider-specific settings (endpoint, path style, etc.)**
|
||||
- See [Object Store Configuration](object_stores.md) for provider-specific settings
|
||||
- Ensure `endpointURL` and `s3UsePathStyle` match your storage type
|
||||
- Verify network policies allow egress to your storage provider
|
||||
|
||||
## Diagnostic Commands
|
||||
|
||||
### Using the `cnpg` plugin for `kubectl`
|
||||
|
||||
The `cnpg` plugin for `kubectl` provides extended debugging capabilities.
|
||||
Keep it updated:
|
||||
|
||||
```sh
|
||||
# Install or update the `cnpg` plugin
|
||||
kubectl krew install cnpg
|
||||
# Or using an alternative method: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install
|
||||
|
||||
# Check plugin status (requires CNPG 1.27.0+)
|
||||
kubectl cnpg status <cluster-name> -n <namespace>
|
||||
|
||||
# View cluster status in detail
|
||||
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
If problems persist:
|
||||
|
||||
1. **Check the documentation**
|
||||
|
||||
- [Installation Guide](installation.mdx)
|
||||
- [Object Store Configuration](object_stores.md) (for provider-specific settings)
|
||||
- [Usage Examples](usage.md)
|
||||
|
||||
|
||||
2. **Gather diagnostic information**
|
||||
|
||||
```sh
|
||||
# Create a diagnostic bundle (⚠️ sanitize these before sharing!)
|
||||
kubectl get objectstores.barmancloud.cnpg.io -A -o yaml > /tmp/objectstores.yaml
|
||||
kubectl get clusters.postgresql.cnpg.io -A -o yaml > /tmp/clusters.yaml
|
||||
kubectl logs -n cnpg-system deployment/barman-cloud --tail=1000 > /tmp/plugin.log
|
||||
```
|
||||
|
||||
|
||||
3. **Community support**
|
||||
|
||||
- CloudNativePG Slack: [#cloudnativepg-users](https://cloud-native.slack.com/messages/cloudnativepg-users)
|
||||
- GitHub Issues: [plugin-barman-cloud](https://github.com/cloudnative-pg/plugin-barman-cloud/issues)
|
||||
|
||||
|
||||
4. **Include when reporting**
|
||||
|
||||
- CloudNativePG version
|
||||
- Plugin version
|
||||
- Kubernetes version
|
||||
- Cloud provider and region
|
||||
- Relevant configuration (⚠️ sanitize/redact sensitive information)
|
||||
- Error messages and logs
|
||||
- Steps to reproduce
|
||||
|
||||
## Known Issues and Limitations
|
||||
|
||||
### Current Known Issues
|
||||
|
||||
1. **Migration compatibility**: After migrating from in-tree backup to the
|
||||
plugin, the `kubectl cnpg backup` command syntax has changed
|
||||
([#353](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/353)):
|
||||
|
||||
```sh
|
||||
# Old command (in-tree, no longer works after migration)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=barmanObjectStore
|
||||
|
||||
# New command (plugin-based)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin --plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
### Plugin Limitations
|
||||
|
||||
1. **Installation method**: Currently only supports manifest and Kustomize
|
||||
installation ([#351](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/351) -
|
||||
Helm chart requested)
|
||||
|
||||
2. **Sidecar resource sharing**: The plugin sidecar container shares pod
|
||||
resources with PostgreSQL
|
||||
|
||||
3. **Plugin restart behavior**: Restarting the sidecar container requires
|
||||
restarting the entire PostgreSQL pod
|
||||
|
||||
## Recap of General Debugging Steps
|
||||
|
||||
### Check Backup Status and Identify the Target Instance
|
||||
|
||||
```sh
|
||||
# List all backups and their status
|
||||
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||
|
||||
# Get detailed backup information including error messages and target instance
|
||||
kubectl describe backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name>
|
||||
|
||||
# Extract the target pod name from a failed backup
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}'
|
||||
|
||||
# Get more details including the target pod, method, phase, and error
|
||||
kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='Pod: {.status.instanceID.podName}{"\n"}Method: {.status.method}{"\n"}Phase: {.status.phase}{"\n"}Error: {.status.error}{"\n"}'
|
||||
|
||||
# Check the cluster status for backup-related information
|
||||
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||
```
|
||||
|
||||
### Check Sidecar Logs on the Backup Target Pod
|
||||
|
||||
```sh
|
||||
# Identify which pod was the backup target (from the previous step)
|
||||
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||
-n <namespace> <backup-name> \
|
||||
-o jsonpath='{.status.instanceID.podName}')
|
||||
echo "Backup target pod: $TARGET_POD"
|
||||
|
||||
# Check the sidecar logs on the specific target pod
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud --tail=100
|
||||
|
||||
# Follow the logs in real time
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud -f
|
||||
|
||||
# Check for specific errors in the target pod around the backup time
|
||||
kubectl logs -n <namespace> $TARGET_POD \
|
||||
-c plugin-barman-cloud --since=10m | grep -E "ERROR|FATAL|panic|failed"
|
||||
|
||||
# Alternative: List all cluster pods and their roles
|
||||
kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> \
|
||||
-o custom-columns=NAME:.metadata.name,ROLE:.metadata.labels.cnpg\\.io/instanceRole,INSTANCE:.metadata.labels.cnpg\\.io/instanceName
|
||||
|
||||
# Check sidecar logs on ALL cluster pods (if the target is unclear)
|
||||
for pod in $(kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> -o name); do
|
||||
echo "=== Checking $pod ==="
|
||||
kubectl logs -n <namespace> $pod -c plugin-barman-cloud \
|
||||
--tail=20 | grep -i error || echo "No errors found"
|
||||
done
|
||||
```
|
||||
|
||||
### Check Events for Backup-Related Issues
|
||||
|
||||
```sh
|
||||
# Check events for the cluster
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.name=<cluster-name>
|
||||
|
||||
# Check events for failed backups
|
||||
kubectl get events -n <namespace> \
|
||||
--field-selector involvedObject.kind=Backup
|
||||
|
||||
# Get all recent events in the namespace
|
||||
kubectl get events -n <namespace> --sort-by='.lastTimestamp' | tail -20
|
||||
```
|
||||
|
||||
### Verify `ObjectStore` Configuration
|
||||
|
||||
```sh
|
||||
# Check the ObjectStore resource
|
||||
kubectl get objectstores.barmancloud.cnpg.io \
|
||||
-n <namespace> <objectstore-name> -o yaml
|
||||
|
||||
# Verify the secret exists and has the correct keys
|
||||
kubectl get secret -n <namespace> <secret-name> -o yaml
|
||||
# Alternatively
|
||||
kubectl get secret -n <namespace> <secret-name> -o jsonpath='{.data}' | jq 'keys'
|
||||
```
|
||||
|
||||
### Common Error Messages and Solutions
|
||||
|
||||
* **"AccessDenied" or "403 Forbidden"** — Check cloud credentials and bucket permissions.
|
||||
* **"NoSuchBucket"** — Verify the bucket exists and the endpoint URL is correct.
|
||||
* **"Connection timeout"** — Check network connectivity and firewall rules.
|
||||
* **"SSL certificate problem"** — For self-signed certificates, verify the CA bundle configuration.
|
||||
|
||||
16
web/versioned_docs/version-0.9.0/upgrades.mdx
Normal file
16
web/versioned_docs/version-0.9.0/upgrades.mdx
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 25
|
||||
---
|
||||
|
||||
# Upgrades
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
You can upgrade the plugin simply by installing the new version. Unless
|
||||
explicitly stated below or in the release notes, no special steps are required.
|
||||
|
||||
## Upgrading to version 0.8.x from previous versions
|
||||
|
||||
Version **0.8.0** introduces breaking changes to resource naming.
|
||||
To complete the upgrade successfully, follow the instructions in the
|
||||
["Resource name migration guide"](resource-name-migration.md).
|
||||
283
web/versioned_docs/version-0.9.0/usage.md
Normal file
283
web/versioned_docs/version-0.9.0/usage.md
Normal file
@ -0,0 +1,283 @@
|
||||
---
|
||||
sidebar_position: 30
|
||||
---
|
||||
|
||||
# Using the Barman Cloud Plugin
|
||||
|
||||
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||
|
||||
After [installing the plugin](installation.mdx) in the same namespace as the
|
||||
CloudNativePG operator, enabling your PostgreSQL cluster to use the Barman
|
||||
Cloud Plugin involves just a few steps:
|
||||
|
||||
- Defining the object store containing your WAL archive and base backups, using
|
||||
your preferred [provider](object_stores.md)
|
||||
- Instructing the Postgres cluster to use the Barman Cloud Plugin
|
||||
|
||||
From that moment, you’ll be able to issue on-demand backups or define a backup
|
||||
schedule, as well as rely on the object store for recovery operations.
|
||||
|
||||
The rest of this page details each step, using MinIO as object store provider.
|
||||
|
||||
## Defining the `ObjectStore`
|
||||
|
||||
An `ObjectStore` resource must be created for each object store used in your
|
||||
PostgreSQL architecture. Here's an example configuration using MinIO:
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
destinationPath: s3://backups/
|
||||
endpointURL: http://minio:9000
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: minio
|
||||
key: ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: minio
|
||||
key: ACCESS_SECRET_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
```
|
||||
|
||||
The `.spec.configuration` schema follows the same format as the
|
||||
[in-tree barman-cloud support](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration).
|
||||
Refer to [the CloudNativePG documentation](https://cloudnative-pg.io/documentation/preview/backup_barmanobjectstore/)
|
||||
for additional details.
|
||||
|
||||
:::important
|
||||
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||
:::
|
||||
|
||||
## Configuring WAL Archiving
|
||||
|
||||
Once the `ObjectStore` is defined, you can configure your PostgreSQL cluster
|
||||
to archive WALs by referencing the store in the `.spec.plugins` section:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-example
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: Always
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-store
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
This configuration enables both WAL archiving and data directory backups.
|
||||
|
||||
## Performing a Base Backup
|
||||
|
||||
Once WAL archiving is enabled, the cluster is ready for backups. Backups can be
|
||||
created either declaratively (with YAML manifests) or imperatively (with the
|
||||
`cnpg` plugin).
|
||||
|
||||
### Declarative approach (YAML manifest)
|
||||
|
||||
Create a backup resource by applying a YAML manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Backup
|
||||
metadata:
|
||||
name: backup-example
|
||||
spec:
|
||||
cluster:
|
||||
name: cluster-example
|
||||
method: plugin
|
||||
pluginConfiguration:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
### Imperative approach (using the `cnpg` plugin)
|
||||
|
||||
The quickest way to trigger an on-demand backup is with the `cnpg` plugin:
|
||||
|
||||
```bash
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin \
|
||||
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
|
||||
:::note Migration from in-tree backups
|
||||
If you are migrating from the in-tree backup system, note the change in syntax:
|
||||
|
||||
```bash
|
||||
# Old command (in-tree backup)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> --method=barmanObjectStore
|
||||
|
||||
# New command (plugin-based backup)
|
||||
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||
--method=plugin \
|
||||
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||
```
|
||||
:::
|
||||
|
||||
## Restoring a Cluster
|
||||
|
||||
To restore a cluster from an object store, create a new `Cluster` resource that
|
||||
references the store containing the backup. Below is an example configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-restore
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: IfNotPresent
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store
|
||||
serverName: cluster-example
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
:::important
|
||||
The above configuration does **not** enable WAL archiving for the restored cluster.
|
||||
:::
|
||||
|
||||
To enable WAL archiving for the restored cluster, include the `.spec.plugins`
|
||||
section alongside the `externalClusters.plugin` section, as shown below:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-restore
|
||||
spec:
|
||||
instances: 3
|
||||
imagePullPolicy: IfNotPresent
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: source
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
# Backup Object Store (push, read-write)
|
||||
barmanObjectName: minio-store-bis
|
||||
externalClusters:
|
||||
- name: source
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
# Recovery Object Store (pull, read-only)
|
||||
barmanObjectName: minio-store
|
||||
serverName: cluster-example
|
||||
storage:
|
||||
size: 1Gi
|
||||
```
|
||||
|
||||
The same object store may be used for both transaction log archiving and
|
||||
restoring a cluster, or you can configure separate stores for these purposes.
|
||||
|
||||
## Configuring Replica Clusters
|
||||
|
||||
You can set up a distributed topology by combining the previously defined
|
||||
configurations with the `.spec.replica` section. Below is an example of how to
|
||||
define a replica cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster-dc-a
|
||||
spec:
|
||||
instances: 3
|
||||
primaryUpdateStrategy: unsupervised
|
||||
|
||||
storage:
|
||||
storageClass: csi-hostpath-sc
|
||||
size: 1Gi
|
||||
|
||||
plugins:
|
||||
- name: barman-cloud.cloudnative-pg.io
|
||||
isWALArchiver: true
|
||||
parameters:
|
||||
barmanObjectName: minio-store-a
|
||||
|
||||
replica:
|
||||
self: cluster-dc-a
|
||||
primary: cluster-dc-a
|
||||
source: cluster-dc-b
|
||||
|
||||
externalClusters:
|
||||
- name: cluster-dc-a
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store-a
|
||||
|
||||
- name: cluster-dc-b
|
||||
plugin:
|
||||
name: barman-cloud.cloudnative-pg.io
|
||||
parameters:
|
||||
barmanObjectName: minio-store-b
|
||||
```
|
||||
|
||||
## Configuring the plugin instance sidecar
|
||||
|
||||
The Barman Cloud Plugin runs as a sidecar container next to each PostgreSQL
|
||||
instance pod. It manages backup, WAL archiving, and restore processes.
|
||||
|
||||
Configuration comes from multiple `ObjectStore` resources:
|
||||
|
||||
1. The one referenced in the
|
||||
`.spec.plugins` section of the `Cluster`. This is the
|
||||
object store used for WAL archiving and base backups.
|
||||
2. The one referenced in the external cluster
|
||||
used in the `.spec.replica.source` section of the `Cluster`. This is
|
||||
used by the log-shipping designated primary to get the WAL files.
|
||||
3. The one referenced in the
|
||||
`.spec.bootstrap.recovery.source` section of the `Cluster`. Used by
|
||||
the initial recovery job to create the cluster from an existing backup.
|
||||
|
||||
You can fine-tune sidecar behavior in the `.spec.instanceSidecarConfiguration`
|
||||
of your ObjectStore. These settings apply to all PostgreSQL instances that use
|
||||
this object store. Any updates take effect at the next `Cluster` reconciliation,
|
||||
and could generate a rollout of the `Cluster`.
|
||||
|
||||
```yaml
|
||||
apiVersion: barmancloud.cnpg.io/v1
|
||||
kind: ObjectStore
|
||||
metadata:
|
||||
name: minio-store
|
||||
spec:
|
||||
configuration:
|
||||
# [...]
|
||||
instanceSidecarConfiguration:
|
||||
retentionPolicyIntervalSeconds: 1800
|
||||
resources:
|
||||
requests:
|
||||
memory: "XXX"
|
||||
cpu: "YYY"
|
||||
limits:
|
||||
memory: "XXX"
|
||||
cpu: "YYY"
|
||||
```
|
||||
|
||||
:::note
|
||||
If more than one `ObjectStore` applies, the `instanceSidecarConfiguration` of
|
||||
the one set in `.spec.plugins` has priority.
|
||||
:::
|
||||
8
web/versioned_sidebars/version-0.8.0-sidebars.json
Normal file
8
web/versioned_sidebars/version-0.8.0-sidebars.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
"type": "autogenerated",
|
||||
"dirName": "."
|
||||
}
|
||||
]
|
||||
}
|
||||
8
web/versioned_sidebars/version-0.9.0-sidebars.json
Normal file
8
web/versioned_sidebars/version-0.9.0-sidebars.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
"type": "autogenerated",
|
||||
"dirName": "."
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,4 +1,6 @@
|
||||
[
|
||||
"0.9.0",
|
||||
"0.8.0",
|
||||
"0.7.0",
|
||||
"0.6.0",
|
||||
"0.5.0",
|
||||
|
||||
1231
web/yarn.lock
1231
web/yarn.lock
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user