mirror of
https://github.com/cloudnative-pg/plugin-barman-cloud.git
synced 2026-01-11 21:23:12 +01:00
Compare commits
11 Commits
550745c66b
...
996bf47a43
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
996bf47a43 | ||
|
|
afa39ba786 | ||
|
|
b7b2b5b787 | ||
|
|
1f1d30043b | ||
|
|
db96dcc128 | ||
|
|
2cb76ef622 | ||
|
|
cdb893a70d | ||
|
|
6d2d3eb925 | ||
|
|
80dfe727ed | ||
|
|
c811749c32 | ||
|
|
1c3dd8558f |
@ -15,6 +15,7 @@ RUN go mod download
|
|||||||
COPY ../cmd/manager/main.go cmd/manager/main.go
|
COPY ../cmd/manager/main.go cmd/manager/main.go
|
||||||
COPY ../api/ api/
|
COPY ../api/ api/
|
||||||
COPY ../internal/ internal/
|
COPY ../internal/ internal/
|
||||||
|
COPY ../pkg/ pkg/
|
||||||
|
|
||||||
ENV GOCACHE=/root/.cache/go-build
|
ENV GOCACHE=/root/.cache/go-build
|
||||||
ENV GOMODCACHE=/go/pkg/mod
|
ENV GOMODCACHE=/go/pkg/mod
|
||||||
|
|||||||
@ -24,6 +24,7 @@ ENV GOMODCACHE=/go/pkg/mod
|
|||||||
COPY ../cmd/manager/main.go cmd/manager/main.go
|
COPY ../cmd/manager/main.go cmd/manager/main.go
|
||||||
COPY ../api/ api/
|
COPY ../api/ api/
|
||||||
COPY ../internal/ internal/
|
COPY ../internal/ internal/
|
||||||
|
COPY ../pkg/ pkg/
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||||
|
|||||||
68
go.mod
68
go.mod
@ -7,11 +7,11 @@ toolchain go1.25.5
|
|||||||
require (
|
require (
|
||||||
github.com/cert-manager/cert-manager v1.19.2
|
github.com/cert-manager/cert-manager v1.19.2
|
||||||
github.com/cloudnative-pg/api v1.28.0
|
github.com/cloudnative-pg/api v1.28.0
|
||||||
github.com/cloudnative-pg/barman-cloud v0.3.4-0.20251203100017-1d476f125c5b
|
github.com/cloudnative-pg/barman-cloud v0.4.0
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.28.0
|
github.com/cloudnative-pg/cloudnative-pg v1.28.0
|
||||||
github.com/cloudnative-pg/cnpg-i v0.3.0
|
github.com/cloudnative-pg/cnpg-i v0.3.1
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.4.1
|
github.com/cloudnative-pg/cnpg-i-machinery v0.4.2
|
||||||
github.com/cloudnative-pg/machinery v0.3.1
|
github.com/cloudnative-pg/machinery v0.3.3
|
||||||
github.com/onsi/ginkgo/v2 v2.27.3
|
github.com/onsi/ginkgo/v2 v2.27.3
|
||||||
github.com/onsi/gomega v1.38.3
|
github.com/onsi/gomega v1.38.3
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
@ -46,25 +46,25 @@ require (
|
|||||||
github.com/go-logr/logr v1.4.3 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-logr/zapr v1.3.0 // indirect
|
github.com/go-logr/zapr v1.3.0 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.22.1 // indirect
|
github.com/go-openapi/jsonpointer v0.22.4 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.21.2 // indirect
|
github.com/go-openapi/jsonreference v0.21.4 // indirect
|
||||||
github.com/go-openapi/swag v0.24.1 // indirect
|
github.com/go-openapi/swag v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/cmdutils v0.24.0 // indirect
|
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/conv v0.24.0 // indirect
|
github.com/go-openapi/swag/conv v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/fileutils v0.24.0 // indirect
|
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/jsonname v0.25.1 // indirect
|
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/jsonutils v0.24.0 // indirect
|
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/loading v0.24.0 // indirect
|
github.com/go-openapi/swag/loading v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/mangling v0.24.0 // indirect
|
github.com/go-openapi/swag/mangling v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/netutils v0.24.0 // indirect
|
github.com/go-openapi/swag/netutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/stringutils v0.24.0 // indirect
|
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/typeutils v0.24.0 // indirect
|
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||||
github.com/go-openapi/swag/yamlutils v0.24.0 // indirect
|
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||||
github.com/google/btree v1.1.3 // indirect
|
github.com/google/btree v1.1.3 // indirect
|
||||||
github.com/google/cel-go v0.26.0 // indirect
|
github.com/google/cel-go v0.26.0 // indirect
|
||||||
github.com/google/gnostic-models v0.7.0 // indirect
|
github.com/google/gnostic-models v0.7.1 // indirect
|
||||||
github.com/google/go-cmp v0.7.0 // indirect
|
github.com/google/go-cmp v0.7.0 // indirect
|
||||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
@ -72,11 +72,9 @@ require (
|
|||||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect
|
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect
|
||||||
github.com/lib/pq v1.10.9 // indirect
|
github.com/lib/pq v1.10.9 // indirect
|
||||||
github.com/mailru/easyjson v0.9.0 // indirect
|
|
||||||
github.com/moby/spdystream v0.5.0 // indirect
|
github.com/moby/spdystream v0.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||||
@ -88,8 +86,8 @@ require (
|
|||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 // indirect
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 // indirect
|
||||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||||
github.com/prometheus/client_model v0.6.2 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/common v0.66.1 // indirect
|
github.com/prometheus/common v0.67.4 // indirect
|
||||||
github.com/prometheus/procfs v0.17.0 // indirect
|
github.com/prometheus/procfs v0.19.2 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||||
github.com/snorwin/jsonpatch v1.5.0 // indirect
|
github.com/snorwin/jsonpatch v1.5.0 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||||
@ -115,29 +113,29 @@ require (
|
|||||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||||
golang.org/x/mod v0.29.0 // indirect
|
golang.org/x/mod v0.30.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.48.0 // indirect
|
||||||
golang.org/x/oauth2 v0.32.0 // indirect
|
golang.org/x/oauth2 v0.34.0 // indirect
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
golang.org/x/sys v0.38.0 // indirect
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
golang.org/x/term v0.37.0 // indirect
|
golang.org/x/term v0.38.0 // indirect
|
||||||
golang.org/x/text v0.31.0 // indirect
|
golang.org/x/text v0.32.0 // indirect
|
||||||
golang.org/x/time v0.13.0 // indirect
|
golang.org/x/time v0.14.0 // indirect
|
||||||
golang.org/x/tools v0.38.0 // indirect
|
golang.org/x/tools v0.39.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
|
||||||
google.golang.org/protobuf v1.36.10 // indirect
|
google.golang.org/protobuf v1.36.11 // indirect
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
k8s.io/apiserver v0.35.0 // indirect
|
k8s.io/apiserver v0.35.0 // indirect
|
||||||
k8s.io/component-base v0.35.0 // indirect
|
k8s.io/component-base v0.35.0 // indirect
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect
|
||||||
sigs.k8s.io/gateway-api v1.4.0 // indirect
|
sigs.k8s.io/gateway-api v1.4.0 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
142
go.sum
142
go.sum
@ -18,16 +18,16 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
|
|||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cloudnative-pg/api v1.28.0 h1:xElzHliO0eKkVQafkfMhDJo0aIRCmB1ItEt+SGh6B58=
|
github.com/cloudnative-pg/api v1.28.0 h1:xElzHliO0eKkVQafkfMhDJo0aIRCmB1ItEt+SGh6B58=
|
||||||
github.com/cloudnative-pg/api v1.28.0/go.mod h1:puXJBOsEaJd8JLgvCtxgl2TO/ZANap/z7bPepKRUgrk=
|
github.com/cloudnative-pg/api v1.28.0/go.mod h1:puXJBOsEaJd8JLgvCtxgl2TO/ZANap/z7bPepKRUgrk=
|
||||||
github.com/cloudnative-pg/barman-cloud v0.3.4-0.20251203100017-1d476f125c5b h1:7qpnZpOkmjhs0Prasu8laSaiEQ7eC2qW1xA39mQ/aEc=
|
github.com/cloudnative-pg/barman-cloud v0.4.0 h1:V4ajM5yDWq2m+TxmnDtCBGmfMXAxbXr9k7lfR4jM+eE=
|
||||||
github.com/cloudnative-pg/barman-cloud v0.3.4-0.20251203100017-1d476f125c5b/go.mod h1:F6JqmFpa3V0/8paxu372tvxH7F6NrfUbtul3zrsoy+k=
|
github.com/cloudnative-pg/barman-cloud v0.4.0/go.mod h1:AWdyNP2jvMO1c7eOOwT8kT+QGyK5O7lEBZX12LEZ1Ic=
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.28.0 h1:vkv0a0ewDSfJOPJrsyUr4uczsxheReAWf/k171V0Dm0=
|
github.com/cloudnative-pg/cloudnative-pg v1.28.0 h1:vkv0a0ewDSfJOPJrsyUr4uczsxheReAWf/k171V0Dm0=
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.28.0/go.mod h1:209fkRR6m0vXUVQ9Q498eAPQqN2UlXECbXXtpGsZz3I=
|
github.com/cloudnative-pg/cloudnative-pg v1.28.0/go.mod h1:209fkRR6m0vXUVQ9Q498eAPQqN2UlXECbXXtpGsZz3I=
|
||||||
github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE=
|
github.com/cloudnative-pg/cnpg-i v0.3.1 h1:fKj8NoToWI11HUL2UWYJBpkVzmaTvbs3kDMo7wQF8RU=
|
||||||
github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g=
|
github.com/cloudnative-pg/cnpg-i v0.3.1/go.mod h1:glRDiJLJY51FY8ScJIv/OkaGJxFnojJkkNAqSy5XC6s=
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.4.1 h1:M4ELhZO0e7UjiV3PZ3uGJOz3SgG/3ThWiqkbrFCcXNE=
|
github.com/cloudnative-pg/cnpg-i-machinery v0.4.2 h1:0reS9MtyLYINHXQ/MfxJ9jp39hhBf8e3Qdj+T5Nsq6I=
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.4.1/go.mod h1:TxwoMgQINOJkS1kepudqCBTYd8QYev6JoT8R52pYD4E=
|
github.com/cloudnative-pg/cnpg-i-machinery v0.4.2/go.mod h1:gvrKabgxXq0zGthXGucemDdsxakLEQDMxn43M4HLW30=
|
||||||
github.com/cloudnative-pg/machinery v0.3.1 h1:KtPA6EwELTUNisCMLiFYkK83GU9606rkGQhDJGPB8Yw=
|
github.com/cloudnative-pg/machinery v0.3.3 h1:CaqXqLTJH9RrVv3R/YU0NmFaI/F18HLg2JfH3mQLcDk=
|
||||||
github.com/cloudnative-pg/machinery v0.3.1/go.mod h1:jebuqKxZAbrRKDEEpVCIDMKW+FbWtB9Kf/hb2kMUu9o=
|
github.com/cloudnative-pg/machinery v0.3.3/go.mod h1:RYAYlVKBF5pH4mg+Q8wHjNDyENV9ajbkG41zOEf8DEs=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -64,34 +64,40 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||||
github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
|
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
|
||||||
github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
|
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
|
||||||
github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU=
|
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
|
||||||
github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ=
|
github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
|
||||||
github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8=
|
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
|
||||||
github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A=
|
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
|
||||||
github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I=
|
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
|
||||||
github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8=
|
github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
|
||||||
github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik=
|
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
|
||||||
github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c=
|
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
|
||||||
github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak=
|
github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
|
||||||
github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90=
|
github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
|
||||||
github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
|
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
|
||||||
github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
|
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
|
||||||
github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts=
|
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
|
||||||
github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0=
|
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
|
||||||
github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc=
|
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=
|
||||||
github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk=
|
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM=
|
||||||
github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk=
|
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
|
||||||
github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc=
|
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
|
||||||
github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w=
|
github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
|
||||||
github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM=
|
github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
|
||||||
github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM=
|
github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
|
||||||
github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w=
|
github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
|
||||||
github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw=
|
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
|
||||||
github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI=
|
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
|
||||||
github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c=
|
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
|
||||||
github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8=
|
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
|
||||||
|
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
|
||||||
|
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
|
||||||
|
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=
|
||||||
|
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=
|
||||||
|
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
|
||||||
|
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||||
@ -104,8 +110,8 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
|||||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=
|
||||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
@ -123,8 +129,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+u
|
|||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
|
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
|
||||||
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
|
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
@ -141,8 +145,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
|
||||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
|
||||||
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
|
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
|
||||||
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
|
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
|
||||||
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
|
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
|
||||||
@ -178,10 +180,10 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h
|
|||||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
@ -267,24 +269,24 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
|||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
|
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
@ -295,8 +297,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:
|
|||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@ -322,8 +324,8 @@ k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94=
|
|||||||
k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0=
|
k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0=
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ=
|
||||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||||
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE=
|
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE=
|
||||||
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0=
|
||||||
@ -340,7 +342,7 @@ sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6
|
|||||||
sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ=
|
sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ=
|
||||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=
|
||||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||||
|
|||||||
@ -20,13 +20,17 @@ SPDX-License-Identifier: Apache-2.0
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
|
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
|
||||||
|
"github.com/cloudnative-pg/barman-cloud/pkg/command"
|
||||||
|
|
||||||
|
apiv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
|
pluginmetadata "github.com/cloudnative-pg/plugin-barman-cloud/pkg/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: refactor.
|
// TODO: refactor.
|
||||||
@ -97,3 +101,14 @@ func MergeEnv(env []string, incomingEnv []string) []string {
|
|||||||
func BuildCertificateFilePath(objectStoreName string) string {
|
func BuildCertificateFilePath(objectStoreName string) string {
|
||||||
return path.Join(metadata.BarmanCertificatesPath, objectStoreName, metadata.BarmanCertificatesFileName)
|
return path.Join(metadata.BarmanCertificatesPath, objectStoreName, metadata.BarmanCertificatesFileName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContextWithProviderOptions enriches the context with cloud service provider specific options
|
||||||
|
// based on the ObjectStore resource
|
||||||
|
func ContextWithProviderOptions(ctx context.Context, objectStore apiv1.ObjectStore) context.Context {
|
||||||
|
if objectStore.GetAnnotations()[pluginmetadata.UseDefaultAzureCredentialAnnotationName] ==
|
||||||
|
pluginmetadata.UseDefaultAzureCredentialTrueValue {
|
||||||
|
return command.ContextWithDefaultAzureCredentials(ctx, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|||||||
@ -127,6 +127,8 @@ func (w WALServiceImplementation) Archive(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = ContextWithProviderOptions(ctx, objectStore)
|
||||||
|
|
||||||
envArchive, err := barmanCredentials.EnvSetCloudCredentialsAndCertificates(
|
envArchive, err := barmanCredentials.EnvSetCloudCredentialsAndCertificates(
|
||||||
ctx,
|
ctx,
|
||||||
w.Client,
|
w.Client,
|
||||||
|
|||||||
@ -87,6 +87,8 @@ func (b BackupServiceImplementation) Backup(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = common.ContextWithProviderOptions(ctx, objectStore)
|
||||||
|
|
||||||
if err := fileutils.EnsureDirectoryExists(postgres.BackupTemporaryDirectory); err != nil {
|
if err := fileutils.EnsureDirectoryExists(postgres.BackupTemporaryDirectory); err != nil {
|
||||||
contextLogger.Error(err, "Cannot create backup temporary directory", "err", err)
|
contextLogger.Error(err, "Cannot create backup temporary directory", "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -93,6 +93,8 @@ func (c *CatalogMaintenanceRunnable) cycle(ctx context.Context) (time.Duration,
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = common.ContextWithProviderOptions(ctx, barmanObjectStore)
|
||||||
|
|
||||||
if err := c.maintenance(ctx, &cluster, &barmanObjectStore); err != nil {
|
if err := c.maintenance(ctx, &cluster, &barmanObjectStore); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -109,7 +109,7 @@ func (impl JobHookImpl) Restore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := impl.checkBackupDestination(
|
if err := impl.checkBackupDestination(
|
||||||
ctx,
|
common.ContextWithProviderOptions(ctx, targetObjectStore),
|
||||||
configuration.Cluster,
|
configuration.Cluster,
|
||||||
&targetObjectStore.Spec.Configuration,
|
&targetObjectStore.Spec.Configuration,
|
||||||
targetObjectStore.Name,
|
targetObjectStore.Name,
|
||||||
@ -118,6 +118,8 @@ func (impl JobHookImpl) Restore(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = common.ContextWithProviderOptions(ctx, recoveryObjectStore)
|
||||||
|
|
||||||
// Detect the backup to recover
|
// Detect the backup to recover
|
||||||
backup, env, err := loadBackupObjectFromExternalCluster(
|
backup, env, err := loadBackupObjectFromExternalCluster(
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
2
pkg/metadata/doc.go
Normal file
2
pkg/metadata/doc.go
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Package metadata provides metadata utilities for the Barman Cloud plugin
|
||||||
|
package metadata
|
||||||
15
pkg/metadata/labels_annotations.go
Normal file
15
pkg/metadata/labels_annotations.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package metadata
|
||||||
|
|
||||||
|
// MetadataNamespace is the namespace used for the Barman Cloud plugin metadata
|
||||||
|
const MetadataNamespace = "barmancloud.cnpg.io"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UseDefaultAzureCredentialAnnotationName is an annotation that can be set
|
||||||
|
// on an ObjectStore resource to enable the authentication to Azure via DefaultAzureCredential.
|
||||||
|
// This is meant to be used with inheritFromAzureAD enabled.
|
||||||
|
UseDefaultAzureCredentialAnnotationName = MetadataNamespace + "/useDefaultAzureCredential"
|
||||||
|
|
||||||
|
// UseDefaultAzureCredentialTrueValue is the value for the annotation
|
||||||
|
// barmancloud.cnpg.io/useDefaultAzureCredential to enable the DefaultAzureCredentials auth mechanism.
|
||||||
|
UseDefaultAzureCredentialTrueValue = "true"
|
||||||
|
)
|
||||||
@ -233,6 +233,7 @@ Barman Cloud supports the following authentication methods:
|
|||||||
- Storage Account Name + [Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
|
- Storage Account Name + [Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
|
||||||
- Storage Account Name + [SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
|
- Storage Account Name + [SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
|
||||||
- [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
|
- [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
|
||||||
|
- [DefaultAzureCredential](https://learn.microsoft.com/en-us/azure/developer/go/sdk/authentication/credential-chains#defaultazurecredential-overview)
|
||||||
|
|
||||||
### Azure AD Workload Identity
|
### Azure AD Workload Identity
|
||||||
|
|
||||||
@ -252,6 +253,27 @@ spec:
|
|||||||
[...]
|
[...]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### DefaultAzureCredential
|
||||||
|
|
||||||
|
To authenticate using `DefaultAzureCredential`, set the annotation
|
||||||
|
`barmancloud.cnpg.io/useDefaultAzureCredential="true"` on the ObjectStore in
|
||||||
|
conjunction with the `.spec.configuration.inheritFromAzureAD` option:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: azure-store
|
||||||
|
annotations:
|
||||||
|
barmancloud.cnpg.io/useDefaultAzureCredential: "true"
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "<destination path here>"
|
||||||
|
azureCredentials:
|
||||||
|
inheritFromAzureAD: true
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
### Access Key, SAS Token, or Connection String
|
### Access Key, SAS Token, or Connection String
|
||||||
|
|
||||||
Store credentials in a Kubernetes secret:
|
Store credentials in a Kubernetes secret:
|
||||||
|
|||||||
@ -339,7 +339,7 @@ For detailed Barman restore operations and troubleshooting, refer to the
|
|||||||
recovery:
|
recovery:
|
||||||
source: origin
|
source: origin
|
||||||
recoveryTarget:
|
recoveryTarget:
|
||||||
targetTime: "2024-01-15 10:30:00"
|
targetTime: "2024-01-15T10:30:00Z"
|
||||||
|
|
||||||
externalClusters:
|
externalClusters:
|
||||||
- name: origin
|
- name: origin
|
||||||
@ -358,6 +358,17 @@ For detailed Barman restore operations and troubleshooting, refer to the
|
|||||||
-c plugin-barman-cloud | grep -i wal
|
-c plugin-barman-cloud | grep -i wal
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Timestamps without an explicit timezone suffix
|
||||||
|
(e.g., `2024-01-15 10:30:00`) are interpreted as UTC.
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Always specify an explicit timezone in your timestamp to avoid ambiguity.
|
||||||
|
For example, use `2024-01-15T10:30:00Z` or `2024-01-15T10:30:00+02:00`
|
||||||
|
instead of `2024-01-15 10:30:00`.
|
||||||
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
For detailed PITR configuration and WAL management, see the
|
For detailed PITR configuration and WAL management, see the
|
||||||
[Barman PITR documentation](https://docs.pgbarman.org/latest/).
|
[Barman PITR documentation](https://docs.pgbarman.org/latest/).
|
||||||
|
|||||||
43
web/versioned_docs/version-0.10.0/compression.md
Normal file
43
web/versioned_docs/version-0.10.0/compression.md
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 80
|
||||||
|
---
|
||||||
|
|
||||||
|
# Compression
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
By default, backups and WAL files are archived **uncompressed**. However, the
|
||||||
|
Barman Cloud Plugin supports multiple compression algorithms via
|
||||||
|
`barman-cloud-backup` and `barman-cloud-wal-archive`, allowing you to optimize
|
||||||
|
for space, speed, or a balance of both.
|
||||||
|
|
||||||
|
### Supported Compression Algorithms
|
||||||
|
|
||||||
|
- `bzip2`
|
||||||
|
- `gzip`
|
||||||
|
- `lz4` (WAL only)
|
||||||
|
- `snappy`
|
||||||
|
- `xz` (WAL only)
|
||||||
|
- `zstd` (WAL only)
|
||||||
|
|
||||||
|
Compression settings for base backups and WAL archives are configured
|
||||||
|
independently. For implementation details, refer to the corresponding API
|
||||||
|
definitions:
|
||||||
|
|
||||||
|
- [`DataBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration)
|
||||||
|
- [`WALBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration)
|
||||||
|
|
||||||
|
:::important
|
||||||
|
Compression impacts both performance and storage efficiency. Choose the right
|
||||||
|
algorithm based on your recovery time objectives (RTO), storage capacity, and
|
||||||
|
network throughput.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Compression Benchmark (on MinIO)
|
||||||
|
|
||||||
|
| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed Size (MB) | Compressed Size (MB) | Ratio |
|
||||||
|
| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ----- |
|
||||||
|
| None | 10,927 | 7,553 | 395 | 395 | 1.0:1 |
|
||||||
|
| bzip2 | 25,404 | 13,886 | 395 | 67 | 5.9:1 |
|
||||||
|
| gzip | 116,281 | 3,077 | 395 | 91 | 4.3:1 |
|
||||||
|
| snappy | 8,134 | 8,341 | 395 | 166 | 2.4:1 |
|
||||||
177
web/versioned_docs/version-0.10.0/concepts.md
Normal file
177
web/versioned_docs/version-0.10.0/concepts.md
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 10
|
||||||
|
---
|
||||||
|
|
||||||
|
# Main Concepts
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
:::important
|
||||||
|
Before proceeding, make sure to review the following sections of the
|
||||||
|
CloudNativePG documentation:
|
||||||
|
|
||||||
|
- [**Backup**](https://cloudnative-pg.io/documentation/current/backup/)
|
||||||
|
- [**WAL Archiving**](https://cloudnative-pg.io/documentation/current/wal_archiving/)
|
||||||
|
- [**Recovery**](https://cloudnative-pg.io/documentation/current/recovery/)
|
||||||
|
:::
|
||||||
|
|
||||||
|
The **Barman Cloud Plugin** enables **hot (online) backups** of PostgreSQL
|
||||||
|
clusters in CloudNativePG through [`barman-cloud`](https://pgbarman.org),
|
||||||
|
supporting continuous physical backups and WAL archiving to an **object
|
||||||
|
store**—without interrupting write operations.
|
||||||
|
|
||||||
|
It also supports both **full recovery** and **Point-in-Time Recovery (PITR)**
|
||||||
|
of a PostgreSQL cluster.
|
||||||
|
|
||||||
|
## The Object Store
|
||||||
|
|
||||||
|
At the core is the [`ObjectStore` custom resource (CRD)](plugin-barman-cloud.v1.md#objectstorespec),
|
||||||
|
which acts as the interface between the PostgreSQL cluster and the target
|
||||||
|
object storage system. It allows you to configure:
|
||||||
|
|
||||||
|
- **Authentication and bucket location** via the `.spec.configuration` section
|
||||||
|
- **WAL archiving** settings—such as compression type, parallelism, and
|
||||||
|
server-side encryption—under `.spec.configuration.wal`
|
||||||
|
- **Base backup options**—with similar settings for compression, concurrency,
|
||||||
|
and encryption—under `.spec.configuration.data`
|
||||||
|
- **Retention policies** to manage the life-cycle of archived WALs and backups
|
||||||
|
via `.spec.configuration.retentionPolicy`
|
||||||
|
|
||||||
|
WAL files are archived in the `wals` directory, while base backups are stored
|
||||||
|
as **tarballs** in the `base` directory, following the
|
||||||
|
[Barman Cloud convention](https://docs.pgbarman.org/cloud/latest/usage/#object-store-layout).
|
||||||
|
|
||||||
|
The plugin also offers advanced capabilities, including
|
||||||
|
[backup tagging](misc.md#backup-object-tagging) and
|
||||||
|
[extra options for backups and WAL archiving](misc.md#extra-options-for-backup-and-wal-archiving).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For details, refer to the
|
||||||
|
[API reference for the `ObjectStore` resource](plugin-barman-cloud.v1.md#objectstorespec).
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Integration with a CloudNativePG Cluster
|
||||||
|
|
||||||
|
CloudNativePG can delegate continuous backup and recovery responsibilities to
|
||||||
|
the **Barman Cloud Plugin** by configuring the `.spec.plugins` section of a
|
||||||
|
`Cluster` resource. This setup requires a corresponding `ObjectStore` resource
|
||||||
|
to be defined.
|
||||||
|
|
||||||
|
:::important
|
||||||
|
While it is technically possible to reuse the same `ObjectStore` for multiple
|
||||||
|
`Cluster` resources within the same namespace, it is strongly recommended to
|
||||||
|
dedicate one object store per PostgreSQL cluster to ensure data isolation and
|
||||||
|
operational clarity.
|
||||||
|
:::
|
||||||
|
|
||||||
|
The following example demonstrates how to configure a CloudNativePG cluster
|
||||||
|
named `cluster-example` to use a previously defined `ObjectStore` (also named
|
||||||
|
`cluster-example`) in the same namespace. Setting `isWALArchiver: true` enables
|
||||||
|
WAL archiving through the plugin:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-example
|
||||||
|
spec:
|
||||||
|
# Other cluster settings...
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
isWALArchiver: true
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: cluster-example
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup of a Postgres Cluster
|
||||||
|
|
||||||
|
Once the object store is defined and the `Cluster` is configured to use the
|
||||||
|
Barman Cloud Plugin, **WAL archiving is activated immediately** on the
|
||||||
|
PostgreSQL primary.
|
||||||
|
|
||||||
|
Physical base backups are seamlessly managed by CloudNativePG using the
|
||||||
|
`Backup` and `ScheduledBackup` resources, respectively for
|
||||||
|
[on-demand](https://cloudnative-pg.io/documentation/current/backup/#on-demand-backups)
|
||||||
|
and
|
||||||
|
[scheduled](https://cloudnative-pg.io/documentation/current/backup/#scheduled-backups)
|
||||||
|
backups.
|
||||||
|
|
||||||
|
To use the Barman Cloud Plugin, you must set the `method` to `plugin` and
|
||||||
|
configure the `pluginConfiguration` section as shown:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
[...]
|
||||||
|
spec:
|
||||||
|
method: plugin
|
||||||
|
pluginConfiguration:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
With this configuration, CloudNativePG supports:
|
||||||
|
|
||||||
|
- Backups from both **primary** and **standby** instances
|
||||||
|
- Backups from **designated primaries** in a distributed topology using
|
||||||
|
[replica clusters](https://cloudnative-pg.io/documentation/current/replica_cluster/)
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For details on how to back up from a standby, refer to the official documentation:
|
||||||
|
[Backup from a standby](https://cloudnative-pg.io/documentation/current/backup/#backup-from-a-standby).
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::important
|
||||||
|
Both backup and WAL archiving operations are executed by sidecar containers
|
||||||
|
running in the same pod as the PostgreSQL `Cluster` primary instance—except
|
||||||
|
when backups are taken from a standby, in which case the sidecar runs alongside
|
||||||
|
the standby pod.
|
||||||
|
The sidecar containers use a [dedicated container image](images.md) that
|
||||||
|
includes only the supported version of Barman Cloud.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Recovery of a Postgres Cluster
|
||||||
|
|
||||||
|
In PostgreSQL, *recovery* refers to the process of starting a database instance
|
||||||
|
from an existing backup. The Barman Cloud Plugin integrates with CloudNativePG
|
||||||
|
to support both **full recovery** and **Point-in-Time Recovery (PITR)** from an
|
||||||
|
object store.
|
||||||
|
|
||||||
|
Recovery in this context is *not in-place*: it bootstraps a brand-new
|
||||||
|
PostgreSQL cluster from a backup and replays the necessary WAL files to reach
|
||||||
|
the desired recovery target.
|
||||||
|
|
||||||
|
To perform a recovery, define an *external cluster* that references the
|
||||||
|
appropriate `ObjectStore`, and use it as the source in the `bootstrap` section
|
||||||
|
of the target cluster:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
[...]
|
||||||
|
spec:
|
||||||
|
[...]
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: source
|
||||||
|
externalClusters:
|
||||||
|
- name: source
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: cluster-example
|
||||||
|
serverName: cluster-example
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
The critical element here is the `externalClusters` section of the `Cluster`
|
||||||
|
resource, where the `plugin` stanza instructs CloudNativePG to use the Barman
|
||||||
|
Cloud Plugin to access the object store for recovery.
|
||||||
|
|
||||||
|
This same mechanism can be used for a variety of scenarios enabled by the
|
||||||
|
CloudNativePG API, including:
|
||||||
|
|
||||||
|
* **Full cluster recovery** from the latest backup
|
||||||
|
* **Point-in-Time Recovery (PITR)**
|
||||||
|
* Bootstrapping **replica clusters** in a distributed topology
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For complete instructions and advanced use cases, refer to the official
|
||||||
|
[Recovery documentation](https://cloudnative-pg.io/documentation/current/recovery/).
|
||||||
|
:::
|
||||||
37
web/versioned_docs/version-0.10.0/images.md
Normal file
37
web/versioned_docs/version-0.10.0/images.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 99
|
||||||
|
---
|
||||||
|
|
||||||
|
# Container Images
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The Barman Cloud Plugin is distributed using two container images:
|
||||||
|
|
||||||
|
- One for deploying the plugin components
|
||||||
|
- One for the sidecar that runs alongside each PostgreSQL instance in a
|
||||||
|
CloudNativePG `Cluster` using the plugin
|
||||||
|
|
||||||
|
## Plugin Container Image
|
||||||
|
|
||||||
|
The plugin image contains the logic required to operate the Barman Cloud Plugin
|
||||||
|
within your Kubernetes environment with CloudNativePG. It is published on the
|
||||||
|
GitHub Container Registry at `ghcr.io/cloudnative-pg/plugin-barman-cloud`.
|
||||||
|
|
||||||
|
This image is built from the
|
||||||
|
[`Dockerfile.plugin`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.plugin)
|
||||||
|
in the plugin repository.
|
||||||
|
|
||||||
|
## Sidecar Container Image
|
||||||
|
|
||||||
|
The sidecar image is used within each PostgreSQL pod in the cluster. It
|
||||||
|
includes the latest supported version of Barman Cloud and is responsible for
|
||||||
|
performing WAL archiving and backups on behalf of CloudNativePG.
|
||||||
|
|
||||||
|
It is available at `ghcr.io/cloudnative-pg/plugin-barman-cloud-sidecar` and is
|
||||||
|
built from the
|
||||||
|
[`Dockerfile.sidecar`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.sidecar).
|
||||||
|
|
||||||
|
These sidecar images are designed to work seamlessly with the
|
||||||
|
[`minimal` PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images)
|
||||||
|
maintained by the CloudNativePG Community.
|
||||||
109
web/versioned_docs/version-0.10.0/installation.mdx
Normal file
109
web/versioned_docs/version-0.10.0/installation.mdx
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 20
|
||||||
|
---
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
:::important
|
||||||
|
1. The plugin **must** be installed in the same namespace as the CloudNativePG
|
||||||
|
operator (typically `cnpg-system`).
|
||||||
|
|
||||||
|
2. Keep in mind that the operator's **listening namespaces** may differ from its
|
||||||
|
installation namespace. Double-check this to avoid configuration issues.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Verifying the Requirements
|
||||||
|
|
||||||
|
Before installing the plugin, make sure the [requirements](intro.md#requirements) are met.
|
||||||
|
|
||||||
|
### CloudNativePG Version
|
||||||
|
|
||||||
|
Ensure you're running a version of CloudNativePG that is compatible with the
|
||||||
|
plugin. If installed in the default `cnpg-system` namespace, you can verify the
|
||||||
|
version with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get deployment -n cnpg-system cnpg-controller-manager \
|
||||||
|
-o jsonpath="{.spec.template.spec.containers[*].image}"
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```output
|
||||||
|
ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0
|
||||||
|
```
|
||||||
|
|
||||||
|
The version **must be 1.26 or newer**.
|
||||||
|
|
||||||
|
### cert-manager
|
||||||
|
|
||||||
|
Use the [cmctl](https://cert-manager.io/docs/reference/cmctl/#installation)
|
||||||
|
tool to confirm that `cert-manager` is installed and available:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cmctl check api
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```output
|
||||||
|
The cert-manager API is ready
|
||||||
|
```
|
||||||
|
|
||||||
|
Both checks are required before proceeding with the installation.
|
||||||
|
|
||||||
|
## Installing the Barman Cloud Plugin
|
||||||
|
|
||||||
|
import { InstallationSnippet } from '@site/src/components/Installation';
|
||||||
|
|
||||||
|
Install the plugin using `kubectl` by applying the manifest for the latest
|
||||||
|
release:
|
||||||
|
|
||||||
|
<InstallationSnippet />
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```output
|
||||||
|
customresourcedefinition.apiextensions.k8s.io/objectstores.barmancloud.cnpg.io created
|
||||||
|
serviceaccount/plugin-barman-cloud created
|
||||||
|
role.rbac.authorization.k8s.io/leader-election-role created
|
||||||
|
clusterrole.rbac.authorization.k8s.io/metrics-auth-role created
|
||||||
|
clusterrole.rbac.authorization.k8s.io/metrics-reader created
|
||||||
|
clusterrole.rbac.authorization.k8s.io/objectstore-editor-role created
|
||||||
|
clusterrole.rbac.authorization.k8s.io/objectstore-viewer-role created
|
||||||
|
clusterrole.rbac.authorization.k8s.io/plugin-barman-cloud created
|
||||||
|
rolebinding.rbac.authorization.k8s.io/leader-election-rolebinding created
|
||||||
|
clusterrolebinding.rbac.authorization.k8s.io/metrics-auth-rolebinding created
|
||||||
|
clusterrolebinding.rbac.authorization.k8s.io/plugin-barman-cloud-binding created
|
||||||
|
secret/plugin-barman-cloud-8tfddg42gf created
|
||||||
|
service/barman-cloud created
|
||||||
|
deployment.apps/barman-cloud configured
|
||||||
|
certificate.cert-manager.io/barman-cloud-client created
|
||||||
|
certificate.cert-manager.io/barman-cloud-server created
|
||||||
|
issuer.cert-manager.io/selfsigned-issuer created
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, check that the deployment is up and running:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl rollout status deployment \
|
||||||
|
-n cnpg-system barman-cloud
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```output
|
||||||
|
deployment "barman-cloud" successfully rolled out
|
||||||
|
```
|
||||||
|
|
||||||
|
This confirms that the plugin is deployed and ready to use.
|
||||||
|
|
||||||
|
## Testing the latest development snapshot
|
||||||
|
|
||||||
|
You can also test the latest development snapshot of the plugin with the
|
||||||
|
following command:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl apply -f \
|
||||||
|
https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml
|
||||||
|
```
|
||||||
86
web/versioned_docs/version-0.10.0/intro.md
Normal file
86
web/versioned_docs/version-0.10.0/intro.md
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: "Introduction"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Barman Cloud Plugin
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The **Barman Cloud Plugin** for [CloudNativePG](https://cloudnative-pg.io/)
|
||||||
|
enables online continuous physical backups of PostgreSQL clusters to object storage
|
||||||
|
using the `barman-cloud` suite from the [Barman](https://docs.pgbarman.org/release/latest/)
|
||||||
|
project.
|
||||||
|
|
||||||
|
:::important
|
||||||
|
If you plan to migrate your existing CloudNativePG cluster to the new
|
||||||
|
plugin-based approach using the Barman Cloud Plugin, see
|
||||||
|
["Migrating from Built-in CloudNativePG Backup"](migration.md)
|
||||||
|
for detailed instructions.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
Before using the Barman Cloud Plugin, ensure that the following components are
|
||||||
|
installed and properly configured:
|
||||||
|
|
||||||
|
- [CloudNativePG](https://cloudnative-pg.io) version 1.26 or later
|
||||||
|
|
||||||
|
- We strongly recommend version 1.27.0 or later, which includes improved
|
||||||
|
error handling and status reporting for the plugin.
|
||||||
|
- If you are running an earlier release, refer to the
|
||||||
|
[upgrade guide](https://cloudnative-pg.io/documentation/current/installation_upgrade).
|
||||||
|
|
||||||
|
- [cert-manager](https://cert-manager.io/)
|
||||||
|
|
||||||
|
- The recommended way to enable secure TLS communication between the plugin
|
||||||
|
and the operator.
|
||||||
|
- Alternatively, you can provide your own certificate bundles. See the
|
||||||
|
[CloudNativePG documentation on TLS configuration](https://cloudnative-pg.io/documentation/current/cnpg_i/#configuring-tls-certificates).
|
||||||
|
|
||||||
|
- [`kubectl-cnpg`](https://cloudnative-pg.io/documentation/current/kubectl-plugin/)
|
||||||
|
plugin (optional but recommended)
|
||||||
|
|
||||||
|
- Simplifies debugging and monitoring with additional status and inspection
|
||||||
|
commands.
|
||||||
|
- Multiple installation options are available in the
|
||||||
|
[installation guide](https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install).
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
This plugin provides the following capabilities:
|
||||||
|
|
||||||
|
- Physical online backup of the data directory
|
||||||
|
- Physical restore of the data directory
|
||||||
|
- Write-Ahead Log (WAL) archiving
|
||||||
|
- WAL restore
|
||||||
|
- Full cluster recovery
|
||||||
|
- Point-in-Time Recovery (PITR)
|
||||||
|
- Seamless integration with replica clusters for bootstrap and WAL restore from archive
|
||||||
|
|
||||||
|
:::important
|
||||||
|
The Barman Cloud Plugin is designed to **replace the in-tree object storage support**
|
||||||
|
previously provided via the `.spec.backup.barmanObjectStore` section in the
|
||||||
|
`Cluster` resource.
|
||||||
|
Backups created using the in-tree approach are fully supported and compatible
|
||||||
|
with this plugin.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Supported Object Storage Providers
|
||||||
|
|
||||||
|
The plugin works with all storage backends supported by `barman-cloud`, including:
|
||||||
|
|
||||||
|
- **Amazon S3**
|
||||||
|
- **Google Cloud Storage**
|
||||||
|
- **Microsoft Azure Blob Storage**
|
||||||
|
|
||||||
|
In addition, the following S3-compatible and simulator solutions have been
|
||||||
|
tested and verified:
|
||||||
|
|
||||||
|
- [MinIO](https://min.io/) – An S3-compatible storage solution
|
||||||
|
- [Azurite](https://github.com/Azure/Azurite) – A simulator for Azure Blob Storage
|
||||||
|
- [fake-gcs-server](https://github.com/fsouza/fake-gcs-server) – A simulator for Google Cloud Storage
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For more details, refer to [Object Store Providers](object_stores.md).
|
||||||
|
:::
|
||||||
274
web/versioned_docs/version-0.10.0/migration.md
Normal file
274
web/versioned_docs/version-0.10.0/migration.md
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 40
|
||||||
|
---
|
||||||
|
|
||||||
|
# Migrating from Built-in CloudNativePG Backup
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The in-tree support for Barman Cloud in CloudNativePG is **deprecated starting
|
||||||
|
from version 1.26** and will be removed in a future release.
|
||||||
|
|
||||||
|
If you're currently relying on the built-in Barman Cloud integration, you can
|
||||||
|
migrate seamlessly to the new **plugin-based architecture** using the Barman
|
||||||
|
Cloud Plugin, without data loss. Follow these steps:
|
||||||
|
|
||||||
|
- [Install the Barman Cloud Plugin](installation.mdx)
|
||||||
|
- Create an `ObjectStore` resource by translating the contents of the
|
||||||
|
`.spec.backup.barmanObjectStore` section from your existing `Cluster`
|
||||||
|
definition
|
||||||
|
- Modify the `Cluster` resource in a single atomic change to switch from
|
||||||
|
in-tree backup to the plugin
|
||||||
|
- Update any `ScheduledBackup` resources to use the plugin
|
||||||
|
- Update the `externalClusters` configuration, where applicable
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For a working example, refer to [this commit](https://github.com/cloudnative-pg/cnpg-playground/commit/596f30e252896edf8f734991c3538df87630f6f7)
|
||||||
|
from the [CloudNativePG Playground project](https://github.com/cloudnative-pg/cnpg-playground),
|
||||||
|
which demonstrates a full migration.
|
||||||
|
:::
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 1: Define the `ObjectStore`
|
||||||
|
|
||||||
|
Begin by creating an `ObjectStore` resource in the same namespace as your
|
||||||
|
PostgreSQL `Cluster`.
|
||||||
|
|
||||||
|
There is a **direct mapping** between the `.spec.backup.barmanObjectStore`
|
||||||
|
section in CloudNativePG and the `.spec.configuration` field in the
|
||||||
|
`ObjectStore` CR. The conversion is mostly mechanical, with one key difference:
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
In the plugin architecture, retention policies are defined as part of the `ObjectStore`.
|
||||||
|
In contrast, the in-tree implementation defined them at the `Cluster` level.
|
||||||
|
:::
|
||||||
|
|
||||||
|
If your `Cluster` used `.spec.backup.retentionPolicy`, move that configuration
|
||||||
|
to `.spec.retentionPolicy` in the `ObjectStore`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
Here’s an excerpt from a traditional in-tree CloudNativePG backup configuration
|
||||||
|
taken from the CloudNativePG Playground project:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: pg-eu
|
||||||
|
spec:
|
||||||
|
# [...]
|
||||||
|
backup:
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: s3://backups/
|
||||||
|
endpointURL: http://minio-eu:9000
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration translates to the following `ObjectStore` resource for the
|
||||||
|
plugin:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-eu
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: s3://backups/
|
||||||
|
endpointURL: http://minio-eu:9000
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
```
|
||||||
|
|
||||||
|
As you can see, the contents of `barmanObjectStore` have been copied directly
|
||||||
|
under the `configuration` field of the `ObjectStore` resource, using the same
|
||||||
|
secret references.
|
||||||
|
|
||||||
|
## Step 2: Update the `Cluster` for plugin WAL archiving
|
||||||
|
|
||||||
|
Once the `ObjectStore` resource is in place, update the `Cluster` resource as
|
||||||
|
follows in a single atomic change:
|
||||||
|
|
||||||
|
- Remove the `.spec.backup.barmanObjectStore` section
|
||||||
|
- Remove `.spec.backup.retentionPolicy` if it was defined (as it is now in the
|
||||||
|
`ObjectStore`)
|
||||||
|
- Remove the entire `spec.backup` section if it is now empty
|
||||||
|
- Add `barman-cloud.cloudnative-pg.io` to the `plugins` list, as described in
|
||||||
|
[Configuring WAL archiving](usage.md#configuring-wal-archiving)
|
||||||
|
|
||||||
|
This will trigger a rolling update of the `Cluster`, switching continuous
|
||||||
|
backup from the in-tree implementation to the plugin-based approach.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
The updated `pg-eu` cluster will have this configuration instead of the
|
||||||
|
previous `backup` section:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
isWALArchiver: true
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-eu
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 3: Update the `ScheduledBackup`
|
||||||
|
|
||||||
|
After switching the `Cluster` to use the plugin, update your `ScheduledBackup`
|
||||||
|
resources to match.
|
||||||
|
|
||||||
|
Set the backup `method` to `plugin` and reference the plugin name via
|
||||||
|
`pluginConfiguration`, as shown in ["Performing a base backup"](usage.md#performing-a-base-backup).
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
Original in-tree `ScheduledBackup`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: ScheduledBackup
|
||||||
|
metadata:
|
||||||
|
name: pg-eu-backup
|
||||||
|
spec:
|
||||||
|
cluster:
|
||||||
|
name: pg-eu
|
||||||
|
schedule: '0 0 0 * * *'
|
||||||
|
backupOwnerReference: self
|
||||||
|
```
|
||||||
|
|
||||||
|
Updated version using the plugin:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: ScheduledBackup
|
||||||
|
metadata:
|
||||||
|
name: pg-eu-backup
|
||||||
|
spec:
|
||||||
|
cluster:
|
||||||
|
name: pg-eu
|
||||||
|
schedule: '0 0 0 * * *'
|
||||||
|
backupOwnerReference: self
|
||||||
|
method: plugin
|
||||||
|
pluginConfiguration:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 4: Update the `externalClusters` configuration
|
||||||
|
|
||||||
|
If your `Cluster` relies on one or more external clusters that use the in-tree
|
||||||
|
Barman Cloud integration, you need to update those configurations to use the
|
||||||
|
plugin-based architecture.
|
||||||
|
|
||||||
|
When a replica cluster fetches WAL files or base backups from an external
|
||||||
|
source that used the built-in backup method, follow these steps:
|
||||||
|
|
||||||
|
1. Create a corresponding `ObjectStore` resource for the external cluster, as
|
||||||
|
shown in [Step 1](#step-1-define-the-objectstore)
|
||||||
|
2. Update the `externalClusters` section of your replica cluster to use the
|
||||||
|
plugin instead of the in-tree `barmanObjectStore` field
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
Consider the original configuration using in-tree Barman Cloud:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: pg-us
|
||||||
|
spec:
|
||||||
|
# [...]
|
||||||
|
externalClusters:
|
||||||
|
- name: pg-eu
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: s3://backups/
|
||||||
|
endpointURL: http://minio-eu:9000
|
||||||
|
serverName: pg-eu
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
```
|
||||||
|
|
||||||
|
Create the `ObjectStore` resource for the external cluster:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-eu
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: s3://backups/
|
||||||
|
endpointURL: http://minio-eu:9000
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-eu
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
```
|
||||||
|
|
||||||
|
Update the external cluster configuration to use the plugin:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: pg-us
|
||||||
|
spec:
|
||||||
|
# [...]
|
||||||
|
externalClusters:
|
||||||
|
- name: pg-eu
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-eu
|
||||||
|
serverName: pg-eu
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 5: Verify your metrics
|
||||||
|
|
||||||
|
When migrating from the in-core solution to the plugin-based approach, you need
|
||||||
|
to monitor a different set of metrics, as described in the
|
||||||
|
["Observability"](observability.md) section.
|
||||||
|
|
||||||
|
The table below summarizes the name changes between the old in-core metrics and
|
||||||
|
the new plugin-based ones:
|
||||||
|
|
||||||
|
| Old metric name | New metric name |
|
||||||
|
| ------------------------------------------------ | ---------------------------------------------------------------- |
|
||||||
|
| `cnpg_collector_last_failed_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp` |
|
||||||
|
| `cnpg_collector_last_available_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp` |
|
||||||
|
| `cnpg_collector_first_recoverability_point` | `barman_cloud_cloudnative_pg_io_first_recoverability_point` |
|
||||||
97
web/versioned_docs/version-0.10.0/misc.md
Normal file
97
web/versioned_docs/version-0.10.0/misc.md
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 90
|
||||||
|
---
|
||||||
|
|
||||||
|
# Miscellaneous
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
## Backup Object Tagging
|
||||||
|
|
||||||
|
You can attach key-value metadata tags to backup artifacts—such as base
|
||||||
|
backups, WAL files, and history files—via the `.spec.configuration` section of
|
||||||
|
the `ObjectStore` resource.
|
||||||
|
|
||||||
|
- `tags`: applied to base backups and WAL files
|
||||||
|
- `historyTags`: applied to history files only
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
[...]
|
||||||
|
tags:
|
||||||
|
backupRetentionPolicy: "expire"
|
||||||
|
historyTags:
|
||||||
|
backupRetentionPolicy: "keep"
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Extra Options for Backup and WAL Archiving
|
||||||
|
|
||||||
|
You can pass additional command-line arguments to `barman-cloud-backup` and
|
||||||
|
`barman-cloud-wal-archive` using the `additionalCommandArgs` field in the
|
||||||
|
`ObjectStore` configuration.
|
||||||
|
|
||||||
|
- `.spec.configuration.data.additionalCommandArgs`: for `barman-cloud-backup`
|
||||||
|
- `.spec.configuration.wal.archiveAdditionalCommandArgs`: for `barman-cloud-wal-archive`
|
||||||
|
|
||||||
|
Each field accepts a list of string arguments. If an argument is already
|
||||||
|
configured elsewhere in the plugin, the duplicate will be ignored.
|
||||||
|
|
||||||
|
### Example: Extra Backup Options
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
data:
|
||||||
|
additionalCommandArgs:
|
||||||
|
- "--min-chunk-size=5MB"
|
||||||
|
- "--read-timeout=60"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Extra WAL Archive Options
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
wal:
|
||||||
|
archiveAdditionalCommandArgs:
|
||||||
|
- "--max-concurrency=1"
|
||||||
|
- "--read-timeout=60"
|
||||||
|
```
|
||||||
|
|
||||||
|
For a complete list of supported options, refer to the
|
||||||
|
[official Barman Cloud documentation](https://docs.pgbarman.org/release/latest/).
|
||||||
|
|
||||||
|
## Enable the pprof debug server for the sidecar
|
||||||
|
|
||||||
|
You can enable the instance sidecar's pprof debug HTTP server by adding the `--pprof-server=<address>` flag to the container's
|
||||||
|
arguments via `.spec.instanceSidecarConfiguration.additionalContainerArgs`.
|
||||||
|
|
||||||
|
Pass a bind address in the form `<host>:<port>` (for example, `0.0.0.0:6061`).
|
||||||
|
An empty value disables the server (disabled by default).
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
spec:
|
||||||
|
instanceSidecarConfiguration:
|
||||||
|
additionalContainerArgs:
|
||||||
|
- "--pprof-server=0.0.0.0:6061"
|
||||||
|
```
|
||||||
454
web/versioned_docs/version-0.10.0/object_stores.md
Normal file
454
web/versioned_docs/version-0.10.0/object_stores.md
Normal file
@ -0,0 +1,454 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 50
|
||||||
|
---
|
||||||
|
|
||||||
|
# Object Store Providers
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The Barman Cloud Plugin enables the storage of PostgreSQL cluster backup files
|
||||||
|
in any object storage service supported by the
|
||||||
|
[Barman Cloud infrastructure](https://docs.pgbarman.org/release/latest/).
|
||||||
|
|
||||||
|
Currently, Barman Cloud supports the following providers:
|
||||||
|
|
||||||
|
- [Amazon S3](#aws-s3)
|
||||||
|
- [Microsoft Azure Blob Storage](#azure-blob-storage)
|
||||||
|
- [Google Cloud Storage](#google-cloud-storage)
|
||||||
|
|
||||||
|
You may also use any S3- or Azure-compatible implementation of the above
|
||||||
|
services.
|
||||||
|
|
||||||
|
To configure object storage with Barman Cloud, you must define an
|
||||||
|
[`ObjectStore` object](plugin-barman-cloud.v1.md#objectstore), which
|
||||||
|
establishes the connection between your PostgreSQL cluster and the object
|
||||||
|
storage backend.
|
||||||
|
|
||||||
|
Configuration details — particularly around authentication — will vary depending on
|
||||||
|
the specific object storage provider you are using.
|
||||||
|
|
||||||
|
The following sections detail the setup for each.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## AWS S3
|
||||||
|
|
||||||
|
[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is one of the
|
||||||
|
most widely adopted object storage solutions.
|
||||||
|
|
||||||
|
The Barman Cloud plugin for CloudNativePG integrates with S3 through two
|
||||||
|
primary authentication mechanisms:
|
||||||
|
|
||||||
|
- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) —
|
||||||
|
recommended for clusters running on EKS
|
||||||
|
- Access keys — using `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials
|
||||||
|
|
||||||
|
### Access Keys
|
||||||
|
|
||||||
|
To authenticate using access keys, you’ll need:
|
||||||
|
|
||||||
|
- `ACCESS_KEY_ID`: the public key used to authenticate to S3
|
||||||
|
- `ACCESS_SECRET_KEY`: the corresponding secret key
|
||||||
|
- `ACCESS_SESSION_TOKEN`: (optional) a temporary session token, if required
|
||||||
|
|
||||||
|
These credentials must be stored securely in a Kubernetes secret:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic aws-creds \
|
||||||
|
--from-literal=ACCESS_KEY_ID=<access key here> \
|
||||||
|
--from-literal=ACCESS_SECRET_KEY=<secret key here>
|
||||||
|
# --from-literal=ACCESS_SESSION_TOKEN=<session token here> # if required
|
||||||
|
```
|
||||||
|
|
||||||
|
The credentials will be encrypted at rest if your Kubernetes environment
|
||||||
|
supports it.
|
||||||
|
|
||||||
|
You can then reference the secret in your `ObjectStore` definition:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: aws-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: aws-creds
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: aws-creds
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### IAM Role for Service Account (IRSA)
|
||||||
|
|
||||||
|
To use IRSA with EKS, configure the service account of the PostgreSQL cluster
|
||||||
|
with the appropriate annotation:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
[...]
|
||||||
|
spec:
|
||||||
|
serviceAccountTemplate:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
eks.amazonaws.com/role-arn: arn:[...]
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### S3 Lifecycle Policy
|
||||||
|
|
||||||
|
Barman Cloud uploads backup files to S3 but does not modify or delete them afterward.
|
||||||
|
To enhance data durability and protect against accidental or malicious loss,
|
||||||
|
it's recommended to implement the following best practices:
|
||||||
|
|
||||||
|
- Enable object versioning
|
||||||
|
- Enable object locking to prevent objects from being deleted or overwritten
|
||||||
|
for a defined period or indefinitely (this provides an additional layer of
|
||||||
|
protection against accidental deletion and ransomware attacks)
|
||||||
|
- Set lifecycle rules to expire current versions a few days after your Barman
|
||||||
|
retention window
|
||||||
|
- Expire non-current versions after a longer period
|
||||||
|
|
||||||
|
These strategies help you safeguard backups without requiring broad delete
|
||||||
|
permissions, ensuring both security and compliance with minimal operational
|
||||||
|
overhead.
|
||||||
|
|
||||||
|
|
||||||
|
### S3-Compatible Storage Providers
|
||||||
|
|
||||||
|
You can use S3-compatible services like **MinIO**, **Linode (Akamai) Object Storage**,
|
||||||
|
or **DigitalOcean Spaces** by specifying a custom `endpointURL`.
|
||||||
|
|
||||||
|
Example with Linode (Akamai) Object Storage (`us-east1`):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: linode-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "s3://BUCKET_NAME/"
|
||||||
|
endpointURL: "https://us-east1.linodeobjects.com"
|
||||||
|
s3Credentials:
|
||||||
|
[...]
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
Recent changes to the [boto3 implementation](https://github.com/boto/boto3/issues/4392)
|
||||||
|
of [Amazon S3 Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)
|
||||||
|
may lead to the `x-amz-content-sha256` error when using the Barman Cloud
|
||||||
|
Plugin.
|
||||||
|
|
||||||
|
If you encounter this issue (see [GitHub issue #393](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/393)),
|
||||||
|
you can apply the following workaround by setting specific environment
|
||||||
|
variables in the `ObjectStore` resource:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: linode-store
|
||||||
|
spec:
|
||||||
|
instanceSidecarConfiguration:
|
||||||
|
env:
|
||||||
|
- name: AWS_REQUEST_CHECKSUM_CALCULATION
|
||||||
|
value: when_required
|
||||||
|
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
|
||||||
|
value: when_required
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
These settings ensure that checksum calculations and validations are only
|
||||||
|
applied when explicitly required, avoiding compatibility issues with certain
|
||||||
|
S3-compatible storage providers.
|
||||||
|
|
||||||
|
Example with DigitalOcean Spaces (SFO3, path-style):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: digitalocean-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "s3://BUCKET_NAME/path/to/folder"
|
||||||
|
endpointURL: "https://sfo3.digitaloceanspaces.com"
|
||||||
|
s3Credentials:
|
||||||
|
[...]
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Object Storage with a Private CA
|
||||||
|
|
||||||
|
For object storage services (e.g., MinIO) that use HTTPS with certificates
|
||||||
|
signed by a private CA, set the `endpointCA` field in the `ObjectStore`
|
||||||
|
definition. Unless you already have it, create a Kubernetes `Secret` with the
|
||||||
|
CA bundle:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic my-ca-secret --from-file=ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
Then reference it:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
endpointURL: <myEndpointURL>
|
||||||
|
endpointCA:
|
||||||
|
name: my-ca-secret
|
||||||
|
key: ca.crt
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!-- TODO: does this also apply to the plugin? -->
|
||||||
|
:::note
|
||||||
|
If you want `ConfigMaps` and `Secrets` to be **automatically** reloaded by
|
||||||
|
instances, you can add a label with the key `cnpg.io/reload` to the
|
||||||
|
`Secrets`/`ConfigMaps`. Otherwise, you will have to reload the instances using the
|
||||||
|
`kubectl cnpg reload` subcommand.
|
||||||
|
:::
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Azure Blob Storage
|
||||||
|
|
||||||
|
[Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/)
|
||||||
|
is Microsoft’s cloud-based object storage solution.
|
||||||
|
|
||||||
|
Barman Cloud supports the following authentication methods:
|
||||||
|
|
||||||
|
- [Connection String](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string)
|
||||||
|
- Storage Account Name + [Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
|
||||||
|
- Storage Account Name + [SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
|
||||||
|
- [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
|
||||||
|
|
||||||
|
### Azure AD Workload Identity
|
||||||
|
|
||||||
|
This method avoids storing credentials in Kubernetes via the
|
||||||
|
`.spec.configuration.inheritFromAzureAD` option:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: azure-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "<destination path here>"
|
||||||
|
azureCredentials:
|
||||||
|
inheritFromAzureAD: true
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Access Key, SAS Token, or Connection String
|
||||||
|
|
||||||
|
Store credentials in a Kubernetes secret:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic azure-creds \
|
||||||
|
--from-literal=AZURE_STORAGE_ACCOUNT=<storage account name> \
|
||||||
|
--from-literal=AZURE_STORAGE_KEY=<storage account key> \
|
||||||
|
--from-literal=AZURE_STORAGE_SAS_TOKEN=<SAS token> \
|
||||||
|
--from-literal=AZURE_STORAGE_CONNECTION_STRING=<connection string>
|
||||||
|
```
|
||||||
|
|
||||||
|
Then reference the required keys in your `ObjectStore`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: azure-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "<destination path here>"
|
||||||
|
azureCredentials:
|
||||||
|
connectionString:
|
||||||
|
name: azure-creds
|
||||||
|
key: AZURE_CONNECTION_STRING
|
||||||
|
storageAccount:
|
||||||
|
name: azure-creds
|
||||||
|
key: AZURE_STORAGE_ACCOUNT
|
||||||
|
storageKey:
|
||||||
|
name: azure-creds
|
||||||
|
key: AZURE_STORAGE_KEY
|
||||||
|
storageSasToken:
|
||||||
|
name: azure-creds
|
||||||
|
key: AZURE_STORAGE_SAS_TOKEN
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
For Azure Blob, the destination path format is:
|
||||||
|
|
||||||
|
```
|
||||||
|
<http|https>://<account-name>.<service-name>.core.windows.net/<container>/<blob>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Azure-Compatible Providers
|
||||||
|
|
||||||
|
If you're using a different implementation (e.g., Azurite or emulator):
|
||||||
|
|
||||||
|
```
|
||||||
|
<http|https>://<local-machine-address>:<port>/<account-name>/<container>/<blob>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Google Cloud Storage
|
||||||
|
|
||||||
|
[Google Cloud Storage](https://cloud.google.com/storage/) is supported with two
|
||||||
|
authentication modes:
|
||||||
|
|
||||||
|
- **GKE Workload Identity** (recommended inside Google Kubernetes Engine)
|
||||||
|
- **Service Account JSON key** via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
||||||
|
|
||||||
|
### GKE Workload Identity
|
||||||
|
|
||||||
|
Use the [Workload Identity authentication](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
|
||||||
|
when running in GKE:
|
||||||
|
|
||||||
|
1. Set `googleCredentials.gkeEnvironment` to `true` in the `ObjectStore`
|
||||||
|
resource
|
||||||
|
2. Annotate the `serviceAccountTemplate` in the `Cluster` resource with the GCP
|
||||||
|
service account
|
||||||
|
|
||||||
|
For example, in the `ObjectStore` resource:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: google-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "gs://<bucket>/<folder>"
|
||||||
|
googleCredentials:
|
||||||
|
gkeEnvironment: true
|
||||||
|
```
|
||||||
|
|
||||||
|
And in the `Cluster` resource:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
spec:
|
||||||
|
serviceAccountTemplate:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com
|
||||||
|
```
|
||||||
|
|
||||||
|
### Service Account JSON Key
|
||||||
|
|
||||||
|
Follow Google’s [authentication setup](https://cloud.google.com/docs/authentication/getting-started),
|
||||||
|
then:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: google-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: "gs://<bucket>/<folder>"
|
||||||
|
googleCredentials:
|
||||||
|
applicationCredentials:
|
||||||
|
name: backup-creds
|
||||||
|
key: gcsCredentials
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
:::important
|
||||||
|
This authentication method generates a JSON file within the container
|
||||||
|
with all the credentials required to access your Google Cloud Storage
|
||||||
|
bucket. As a result, if someone gains access to the `Pod`, they will also have
|
||||||
|
write permissions to the bucket.
|
||||||
|
:::
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
## MinIO Object Store
|
||||||
|
|
||||||
|
In order to use the Tenant resource you first need to deploy the
|
||||||
|
[MinIO operator](https://docs.min.io/community/minio-object-store/operations/deployments/installation.html).
|
||||||
|
For the latest documentation of MinIO, please refer to the
|
||||||
|
[MinIO official documentation](https://docs.min.io/community/minio-object-store/).
|
||||||
|
|
||||||
|
MinIO Object Store's API is compatible with S3, and the default configuration of the Tenant
|
||||||
|
will create these services:
|
||||||
|
- `<tenant>-console` on port 9090 (with autocert) or 9443 (without autocert)
|
||||||
|
- `<tenant>-hl` on port 9000
|
||||||
|
Where `<tenant>` is the `metadata.name` you assigned to your Tenant resource.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The `<tenant>-console` service will only be available if you have enabled the
|
||||||
|
[MinIO Console](https://docs.min.io/community/minio-object-store/administration/minio-console.html).
|
||||||
|
|
||||||
|
For example, the following Tenant:
|
||||||
|
```yml
|
||||||
|
apiVersion: minio.min.io/v2
|
||||||
|
kind: Tenant
|
||||||
|
metadata:
|
||||||
|
name: cnpg-backups
|
||||||
|
spec:
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
would have services called `cnpg-backups-console` and `cnpg-backups-hl` respectively.
|
||||||
|
|
||||||
|
The `console` service is for managing the tenant, while the `hl` service exposes the S3
|
||||||
|
compatible API. If your tenant is configured with `requestAutoCert` you will communicate
|
||||||
|
to these services over HTTPS, if not you will use HTTP.
|
||||||
|
|
||||||
|
For authentication you can use your username and password, or create an access key.
|
||||||
|
Whichever method you choose, it has to be stored as a secret.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic minio-creds \
|
||||||
|
--from-literal=MINIO_ACCESS_KEY=<minio access key or username> \
|
||||||
|
--from-literal=MINIO_SECRET_KEY=<minio secret key or password>
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, create the Barman ObjectStore:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: s3://BUCKET_NAME/
|
||||||
|
endpointURL: http://<tenant>-hl:9000
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-creds
|
||||||
|
key: MINIO_ACCESS_KEY
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-creds
|
||||||
|
key: MINIO_SECRET_KEY
|
||||||
|
[...]
|
||||||
|
```
|
||||||
|
|
||||||
|
:::important
|
||||||
|
Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
|
||||||
|
proceeding with a backup.
|
||||||
|
:::
|
||||||
|
|
||||||
|
---
|
||||||
24
web/versioned_docs/version-0.10.0/observability.md
Normal file
24
web/versioned_docs/version-0.10.0/observability.md
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 55
|
||||||
|
---
|
||||||
|
|
||||||
|
# Observability
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The Barman Cloud Plugin exposes the following metrics through the native
|
||||||
|
Prometheus exporter of the instance manager:
|
||||||
|
|
||||||
|
- `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp`:
|
||||||
|
the UNIX timestamp of the most recent failed backup.
|
||||||
|
|
||||||
|
- `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp`:
|
||||||
|
the UNIX timestamp of the most recent successfully available backup.
|
||||||
|
|
||||||
|
- `barman_cloud_cloudnative_pg_io_first_recoverability_point`:
|
||||||
|
the UNIX timestamp representing the earliest point in time from which the
|
||||||
|
cluster can be recovered.
|
||||||
|
|
||||||
|
These metrics supersede the previously available in-core metrics that used the
|
||||||
|
`cnpg_collector` prefix. The new metrics are exposed under the
|
||||||
|
`barman_cloud_cloudnative_pg_io` prefix instead.
|
||||||
19
web/versioned_docs/version-0.10.0/parameters.md
Normal file
19
web/versioned_docs/version-0.10.0/parameters.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 100
|
||||||
|
---
|
||||||
|
|
||||||
|
# Parameters
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The following parameters are available for the Barman Cloud Plugin:
|
||||||
|
|
||||||
|
- `barmanObjectName`: references the `ObjectStore` resource to be used by the
|
||||||
|
plugin.
|
||||||
|
- `serverName`: Specifies the server name in the object store.
|
||||||
|
|
||||||
|
:::important
|
||||||
|
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||||
|
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||||
|
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||||
|
:::
|
||||||
108
web/versioned_docs/version-0.10.0/plugin-barman-cloud.v1.md
Normal file
108
web/versioned_docs/version-0.10.0/plugin-barman-cloud.v1.md
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
# API Reference
|
||||||
|
|
||||||
|
## Packages
|
||||||
|
- [barmancloud.cnpg.io/v1](#barmancloudcnpgiov1)
|
||||||
|
|
||||||
|
|
||||||
|
## barmancloud.cnpg.io/v1
|
||||||
|
|
||||||
|
Package v1 contains API Schema definitions for the barmancloud v1 API group
|
||||||
|
|
||||||
|
### Resource Types
|
||||||
|
- [ObjectStore](#objectstore)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### InstanceSidecarConfiguration
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
InstanceSidecarConfiguration defines the configuration for the sidecar that runs in the instance pods.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_Appears in:_
|
||||||
|
- [ObjectStoreSpec](#objectstorespec)
|
||||||
|
|
||||||
|
| Field | Description | Required | Default | Validation |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envvar-v1-core) array_ | The environment to be explicitly passed to the sidecar | | | |
|
||||||
|
| `retentionPolicyIntervalSeconds` _integer_ | The retentionCheckInterval defines the frequency at which the<br />system checks and enforces retention policies. | | 1800 | |
|
||||||
|
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Resources define cpu/memory requests and limits for the sidecar that runs in the instance pods. | | | |
|
||||||
|
| `additionalContainerArgs` _string array_ | AdditionalContainerArgs is an optional list of command-line arguments<br />to be passed to the sidecar container when it starts.<br />The provided arguments are appended to the container’s default arguments. | | | |
|
||||||
|
| `logLevel` _string_ | The log level for PostgreSQL instances. Valid values are: `error`, `warning`, `info` (default), `debug`, `trace` | | info | Enum: [error warning info debug trace] <br /> |
|
||||||
|
|
||||||
|
|
||||||
|
#### ObjectStore
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ObjectStore is the Schema for the objectstores API.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
| Field | Description | Required | Default | Validation |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| `apiVersion` _string_ | `barmancloud.cnpg.io/v1` | True | | |
|
||||||
|
| `kind` _string_ | `ObjectStore` | True | | |
|
||||||
|
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | True | | |
|
||||||
|
| `spec` _[ObjectStoreSpec](#objectstorespec)_ | Specification of the desired behavior of the ObjectStore.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | True | | |
|
||||||
|
| `status` _[ObjectStoreStatus](#objectstorestatus)_ | Most recently observed status of the ObjectStore. This data may not be up to<br />date. Populated by the system. Read-only.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | |
|
||||||
|
|
||||||
|
|
||||||
|
#### ObjectStoreSpec
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ObjectStoreSpec defines the desired state of ObjectStore.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_Appears in:_
|
||||||
|
- [ObjectStore](#objectstore)
|
||||||
|
|
||||||
|
| Field | Description | Required | Default | Validation |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| `configuration` _[BarmanObjectStoreConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration)_ | The configuration for the barman-cloud tool suite | True | | |
|
||||||
|
| `retentionPolicy` _string_ | RetentionPolicy is the retention policy to be used for backups<br />and WALs (i.e. '60d'). The retention policy is expressed in the form<br />of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -<br />days, weeks, months. | | | Pattern: `^[1-9][0-9]*[dwm]$` <br /> |
|
||||||
|
| `instanceSidecarConfiguration` _[InstanceSidecarConfiguration](#instancesidecarconfiguration)_ | The configuration for the sidecar that runs in the instance pods | | | |
|
||||||
|
|
||||||
|
|
||||||
|
#### ObjectStoreStatus
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ObjectStoreStatus defines the observed state of ObjectStore.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_Appears in:_
|
||||||
|
- [ObjectStore](#objectstore)
|
||||||
|
|
||||||
|
| Field | Description | Required | Default | Validation |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| `serverRecoveryWindow` _object (keys:string, values:[RecoveryWindow](#recoverywindow))_ | ServerRecoveryWindow maps each server to its recovery window | True | | |
|
||||||
|
|
||||||
|
|
||||||
|
#### RecoveryWindow
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RecoveryWindow represents the time span between the first
|
||||||
|
recoverability point and the last successful backup of a PostgreSQL
|
||||||
|
server, defining the period during which data can be restored.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_Appears in:_
|
||||||
|
- [ObjectStoreStatus](#objectstorestatus)
|
||||||
|
|
||||||
|
| Field | Description | Required | Default | Validation |
|
||||||
|
| --- | --- | --- | --- | --- |
|
||||||
|
| `firstRecoverabilityPoint` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The first recoverability point in a PostgreSQL server refers to<br />the earliest point in time to which the database can be<br />restored. | True | | |
|
||||||
|
| `lastSuccessfulBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last successful backup time | True | | |
|
||||||
|
| `lastFailedBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last failed backup time | True | | |
|
||||||
|
|
||||||
|
|
||||||
219
web/versioned_docs/version-0.10.0/resource-name-migration.md
Normal file
219
web/versioned_docs/version-0.10.0/resource-name-migration.md
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 90
|
||||||
|
---
|
||||||
|
|
||||||
|
# Resource name migration guide
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Before proceeding with the migration process, please:
|
||||||
|
1. **Read this guide in its entirety** to understand what changes will be made
|
||||||
|
2. **Test in a non-production environment** first if possible
|
||||||
|
3. **Ensure you have proper backups** of your cluster configuration
|
||||||
|
|
||||||
|
This migration will delete old RBAC resources only after the
|
||||||
|
`plugin-barman-cloud` upgrade. While the operation is designed to be safe, you
|
||||||
|
should review and understand the changes before proceeding. The maintainers of
|
||||||
|
this project are not responsible for any issues that may arise during
|
||||||
|
migration.
|
||||||
|
|
||||||
|
**Note:** This guide assumes you are using the default `cnpg-system` namespace.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Starting from version **0.8.0**, the `plugin-barman-cloud` deployment manifests
|
||||||
|
use more specific, prefixed resource names to avoid conflicts with other
|
||||||
|
components deployed in the same Kubernetes cluster.
|
||||||
|
|
||||||
|
## What Changed
|
||||||
|
|
||||||
|
The following resources have been renamed to use proper prefixes.
|
||||||
|
|
||||||
|
### Cluster-scoped Resources
|
||||||
|
|
||||||
|
| Old Name | New Name |
|
||||||
|
|----------------------------|------------------------------------------|
|
||||||
|
| `metrics-auth-role` | `barman-plugin-metrics-auth-role` |
|
||||||
|
| `metrics-auth-rolebinding` | `barman-plugin-metrics-auth-rolebinding` |
|
||||||
|
| `metrics-reader` | `barman-plugin-metrics-reader` |
|
||||||
|
| `objectstore-viewer-role` | `barman-plugin-objectstore-viewer-role` |
|
||||||
|
| `objectstore-editor-role` | `barman-plugin-objectstore-editor-role` |
|
||||||
|
|
||||||
|
### Namespace-scoped Resources
|
||||||
|
|
||||||
|
| Old Name | New Name | Namespace |
|
||||||
|
|-------------------------------|---------------------------------------------|---------------|
|
||||||
|
| `leader-election-role` | `barman-plugin-leader-election-role` | `cnpg-system` |
|
||||||
|
| `leader-election-rolebinding` | `barman-plugin-leader-election-rolebinding` | `cnpg-system` |
|
||||||
|
|
||||||
|
## Why This Change?
|
||||||
|
|
||||||
|
Using generic names for cluster-wide resources is discouraged as they may
|
||||||
|
conflict with other components deployed in the same cluster. The new names make
|
||||||
|
it clear that these resources belong to the Barman Cloud plugin and help avoid
|
||||||
|
naming collisions.
|
||||||
|
|
||||||
|
## Migration Instructions
|
||||||
|
|
||||||
|
This three steps migration process is straightforward and can be completed with
|
||||||
|
a few `kubectl` commands.
|
||||||
|
|
||||||
|
### Step 1: Upgrade plugin-barman-cloud
|
||||||
|
|
||||||
|
Please refer to the [Installation](installation.mdx) section to deploy the new
|
||||||
|
`plugin-barman-cloud` release.
|
||||||
|
|
||||||
|
### Step 2: Delete Old Cluster-scoped Resources
|
||||||
|
|
||||||
|
:::danger Verify Resources Before Deletion
|
||||||
|
**IMPORTANT**: The old resource names are generic and could potentially belong
|
||||||
|
to other components in your cluster.
|
||||||
|
|
||||||
|
**Before deleting each resource, verify it belongs to the Barman Cloud plugin
|
||||||
|
by checking:**
|
||||||
|
- For `objectstore-*` roles: Look for `barmancloud.cnpg.io` in the API groups
|
||||||
|
- For `metrics-*` roles: Check if they reference the `plugin-barman-cloud`
|
||||||
|
ServiceAccount in `cnpg-system` namespace
|
||||||
|
- For other roles: Look for labels like `app.kubernetes.io/name: plugin-barman-cloud`
|
||||||
|
|
||||||
|
If a resource doesn't have these indicators, **DO NOT DELETE IT** as it may
|
||||||
|
belong to another application.
|
||||||
|
|
||||||
|
Carefully review the output of each verification command before proceeding with
|
||||||
|
the `delete`.
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::tip Dry Run First
|
||||||
|
You can add `--dry-run=client` to any `kubectl delete` command to preview what
|
||||||
|
would be deleted without actually removing anything.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Only proceed if you've verified these resources belong to the Barman Cloud
|
||||||
|
plugin (see warning above).**
|
||||||
|
|
||||||
|
For each resource below, first verify it belongs to Barman Cloud, then delete
|
||||||
|
it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Check metrics-auth-rolebinding FIRST (we'll check the role after)
|
||||||
|
# Look for references to plugin-barman-cloud ServiceAccount
|
||||||
|
kubectl describe clusterrolebinding metrics-auth-rolebinding
|
||||||
|
# If it references plugin-barman-cloud ServiceAccount in cnpg-system namespace,
|
||||||
|
# delete it:
|
||||||
|
kubectl delete clusterrolebinding metrics-auth-rolebinding
|
||||||
|
|
||||||
|
# 2. Check metrics-auth-role
|
||||||
|
# Look for references to authentication.k8s.io and authorization.k8s.io
|
||||||
|
kubectl describe clusterrole metrics-auth-role
|
||||||
|
# Verify it's not being used by any other rolebindings:
|
||||||
|
kubectl get clusterrolebinding -o json \
|
||||||
|
| jq -r '.items[] | select(.roleRef.name=="metrics-auth-role") \
|
||||||
|
| .metadata.name'
|
||||||
|
# If the above returns nothing (role is not in use) and the role looks like the
|
||||||
|
# Barman Cloud one, delete it (see warnings section):
|
||||||
|
kubectl delete clusterrole metrics-auth-role
|
||||||
|
|
||||||
|
# 3. Check objectstore-viewer-role
|
||||||
|
# Look for barmancloud.cnpg.io API group or
|
||||||
|
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||||
|
kubectl describe clusterrole objectstore-viewer-role
|
||||||
|
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||||
|
kubectl delete clusterrole objectstore-viewer-role
|
||||||
|
|
||||||
|
# 4. Check objectstore-editor-role
|
||||||
|
# Look for barmancloud.cnpg.io API group or
|
||||||
|
# for `app.kubernetes.io/name: plugin-barman-cloud` label
|
||||||
|
kubectl describe clusterrole objectstore-editor-role
|
||||||
|
# If it shows barmancloud.cnpg.io in API groups, delete it:
|
||||||
|
kubectl delete clusterrole objectstore-editor-role
|
||||||
|
|
||||||
|
# 5. Check metrics-reader (MOST DANGEROUS - very generic name)
|
||||||
|
# First, check if it's being used by any rolebindings OTHER than barman's:
|
||||||
|
kubectl get clusterrolebinding -o json | jq -r '.items[] \
|
||||||
|
| select(.roleRef.name=="metrics-reader") \
|
||||||
|
| "\(.metadata.name) -> \(.subjects[0].name) in \(.subjects[0].namespace)"'
|
||||||
|
# If this shows ANY rolebindings, review them carefully. Only proceed if
|
||||||
|
# they're all Barman-related. Then check the role itself:
|
||||||
|
kubectl describe clusterrole metrics-reader
|
||||||
|
# If it ONLY has nonResourceURLs: /metrics and NO other rolebindings use it,
|
||||||
|
# delete it:
|
||||||
|
kubectl delete clusterrole metrics-reader
|
||||||
|
```
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
The `metrics-reader` role is particularly dangerous to delete blindly. Many
|
||||||
|
monitoring systems use this exact name. Only delete it if:
|
||||||
|
|
||||||
|
1. You've verified it ONLY grants access to `/metrics`
|
||||||
|
2. No other rolebindings reference it (checked with the jq command above)
|
||||||
|
3. You're certain it was created by the Barman Cloud plugin
|
||||||
|
|
||||||
|
If you're unsure, it's safer to leave it and let the new
|
||||||
|
`barman-plugin-metrics-reader` role coexist with it.
|
||||||
|
:::
|
||||||
|
|
||||||
|
If any resource is not found during the `describe` command, that's okay - it
|
||||||
|
means it was never created or already deleted. Simply skip the delete command
|
||||||
|
for that resource.
|
||||||
|
|
||||||
|
### Step 3: Delete Old Namespace-scoped Resources
|
||||||
|
|
||||||
|
Delete the old namespace-scoped resources in the `cnpg-system` namespace:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Delete the old leader-election resources
|
||||||
|
kubectl delete role leader-election-role -n cnpg-system
|
||||||
|
kubectl delete rolebinding leader-election-rolebinding -n cnpg-system
|
||||||
|
```
|
||||||
|
|
||||||
|
If any resource is not found, that's okay - it means it was never created or
|
||||||
|
already deleted.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
|
||||||
|
- **Permissions:** If you have custom RBAC rules or tools that reference the
|
||||||
|
old resource names, they will need to be updated.
|
||||||
|
- **External Users:** If end users have been granted the
|
||||||
|
`objectstore-viewer-role` or `objectstore-editor-role`, they will need to be
|
||||||
|
re-granted the new role names (`barman-plugin-objectstore-viewer-role` and
|
||||||
|
`barman-plugin-objectstore-editor-role`).
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After migration, verify that the new resources are created:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check cluster-scoped resources
|
||||||
|
kubectl get clusterrole | grep barman
|
||||||
|
kubectl get clusterrolebinding | grep barman
|
||||||
|
|
||||||
|
# Check namespace-scoped resources
|
||||||
|
kubectl get role,rolebinding -n cnpg-system | grep barman
|
||||||
|
```
|
||||||
|
|
||||||
|
You should see the new prefixed resource names.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Plugin Not Starting After Migration
|
||||||
|
|
||||||
|
If the plugin fails to start after migration, check:
|
||||||
|
|
||||||
|
1. **ServiceAccount permissions:** Ensure the `plugin-barman-cloud` ServiceAccount is bound to the new roles:
|
||||||
|
```bash
|
||||||
|
kubectl get clusterrolebinding barman-plugin-metrics-auth-rolebinding -o yaml
|
||||||
|
kubectl get rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system -o yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Role references:** Verify that the rolebindings reference the correct role names:
|
||||||
|
```bash
|
||||||
|
kubectl describe rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system
|
||||||
|
kubectl describe clusterrolebinding barman-plugin-metrics-auth-rolebinding
|
||||||
|
```
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
If you encounter issues during migration, please open an issue on the [GitHub
|
||||||
|
repository](https://github.com/cloudnative-pg/plugin-barman-cloud/issues).
|
||||||
38
web/versioned_docs/version-0.10.0/retention.md
Normal file
38
web/versioned_docs/version-0.10.0/retention.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 60
|
||||||
|
---
|
||||||
|
|
||||||
|
# Retention Policies
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
The Barman Cloud Plugin supports **automated cleanup of obsolete backups** via
|
||||||
|
retention policies, configured in the `.spec.retentionPolicy` field of the
|
||||||
|
`ObjectStore` resource.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
This feature uses the `barman-cloud-backup-delete` command with the
|
||||||
|
`--retention-policy "RECOVERY WINDOW OF {{ value }} {{ unit }}"` syntax.
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### Example: 30-Day Retention Policy
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
spec:
|
||||||
|
[...]
|
||||||
|
retentionPolicy: "30d"
|
||||||
|
````
|
||||||
|
|
||||||
|
:::note
|
||||||
|
A **recovery window retention policy** ensures the cluster can be restored to
|
||||||
|
any point in time between the calculated *Point of Recoverability* (PoR) and
|
||||||
|
the latest WAL archive. The PoR is defined as `current time - recovery window`.
|
||||||
|
The **first valid backup** is the most recent backup completed before the PoR.
|
||||||
|
Backups older than that are marked as *obsolete* and deleted after the next
|
||||||
|
backup completes.
|
||||||
|
:::
|
||||||
|
|
||||||
591
web/versioned_docs/version-0.10.0/troubleshooting.md
Normal file
591
web/versioned_docs/version-0.10.0/troubleshooting.md
Normal file
@ -0,0 +1,591 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 90
|
||||||
|
---
|
||||||
|
|
||||||
|
# Troubleshooting
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
This guide helps you diagnose and resolve common issues with the Barman Cloud
|
||||||
|
plugin.
|
||||||
|
|
||||||
|
:::important
|
||||||
|
We are continuously improving the integration between CloudNativePG and the
|
||||||
|
Barman Cloud plugin as it moves toward greater stability and maturity. For this
|
||||||
|
reason, we recommend using the latest available version of both components.
|
||||||
|
See the [*Requirements* section](intro.md#requirements) for details.
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The following commands assume you installed the CloudNativePG operator in
|
||||||
|
the default `cnpg-system` namespace. If you installed it in a different
|
||||||
|
namespace, adjust the commands accordingly.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Viewing Logs
|
||||||
|
|
||||||
|
To troubleshoot effectively, you’ll often need to review logs from multiple
|
||||||
|
sources:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# View operator logs (includes plugin interaction logs)
|
||||||
|
kubectl logs -n cnpg-system deployment/cnpg-controller-manager -f
|
||||||
|
|
||||||
|
# View plugin manager logs
|
||||||
|
kubectl logs -n cnpg-system deployment/barman-cloud -f
|
||||||
|
|
||||||
|
# View sidecar container logs (Barman Cloud operations)
|
||||||
|
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud -f
|
||||||
|
|
||||||
|
# View all containers in a pod
|
||||||
|
kubectl logs -n <namespace> <cluster-pod-name> --all-containers=true
|
||||||
|
|
||||||
|
# View previous container logs (if container restarted)
|
||||||
|
kubectl logs -n <namespace> <cluster-pod-name> -c plugin-barman-cloud --previous
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common Issues
|
||||||
|
|
||||||
|
### Plugin Installation Issues
|
||||||
|
|
||||||
|
#### Plugin pods not starting
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- Plugin pods stuck in `CrashLoopBackOff` or `Error`
|
||||||
|
- Plugin deployment not ready
|
||||||
|
|
||||||
|
**Possible causes and solutions:**
|
||||||
|
|
||||||
|
1. **Certificate issues**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check if cert-manager is installed and running
|
||||||
|
kubectl get pods -n cert-manager
|
||||||
|
|
||||||
|
# Check if the plugin certificate is created
|
||||||
|
kubectl get certificates -n cnpg-system
|
||||||
|
```
|
||||||
|
|
||||||
|
If cert-manager is not installed, install it first:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Note: other installation methods for cert-manager are available
|
||||||
|
kubectl apply -f \
|
||||||
|
https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are using your own certificates without cert-manager, you will need
|
||||||
|
to verify the entire certificate chain yourself.
|
||||||
|
|
||||||
|
|
||||||
|
2. **Image pull errors**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check pod events for image pull errors
|
||||||
|
kubectl describe pod -n cnpg-system -l app=barman-cloud
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify the image exists and you have proper credentials if using a private
|
||||||
|
registry.
|
||||||
|
|
||||||
|
|
||||||
|
3. **Resource constraints**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check node resources
|
||||||
|
kubectl top nodes
|
||||||
|
kubectl describe nodes
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure your cluster has sufficient CPU and memory resources.
|
||||||
|
|
||||||
|
### Backup Failures
|
||||||
|
|
||||||
|
#### Quick Backup Troubleshooting Checklist
|
||||||
|
|
||||||
|
When a backup fails, follow these steps in order:
|
||||||
|
|
||||||
|
1. **Check backup status**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||||
|
```
|
||||||
|
2. **Get error details and target pod**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl describe backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name>
|
||||||
|
|
||||||
|
kubectl get backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name> \
|
||||||
|
-o jsonpath='{.status.instanceID.podName}'
|
||||||
|
```
|
||||||
|
3. **Check the target pod’s sidecar logs**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name> \
|
||||||
|
-o jsonpath='{.status.instanceID.podName}')
|
||||||
|
|
||||||
|
kubectl logs \
|
||||||
|
-n <namespace> $TARGET_POD -c plugin-barman-cloud \
|
||||||
|
--tail=100 | grep -E "ERROR|FATAL|panic"
|
||||||
|
```
|
||||||
|
4. **Check cluster events**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get events -n <namespace> \
|
||||||
|
--field-selector involvedObject.name=<cluster-name> \
|
||||||
|
--sort-by='.lastTimestamp'
|
||||||
|
```
|
||||||
|
5. **Verify plugin is running**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get pods \
|
||||||
|
-n cnpg-system -l app=barman-cloud
|
||||||
|
```
|
||||||
|
6. **Check operator logs**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl logs \
|
||||||
|
-n cnpg-system deployment/cnpg-controller-manager \
|
||||||
|
--tail=100 | grep -i "backup\|plugin"
|
||||||
|
```
|
||||||
|
7. **Check plugin manager logs**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl logs \
|
||||||
|
-n cnpg-system deployment/barman-cloud --tail=100
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Backup job fails immediately
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- Backup pods terminate with error
|
||||||
|
- No backup files appear in object storage
|
||||||
|
- Backup shows `failed` phase with various error messages
|
||||||
|
|
||||||
|
**Common failure modes and solutions:**
|
||||||
|
|
||||||
|
1. **"requested plugin is not available" errors**
|
||||||
|
|
||||||
|
```
|
||||||
|
requested plugin is not available: barman
|
||||||
|
requested plugin is not available: barman-cloud
|
||||||
|
requested plugin is not available: barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cause:** The plugin name in the Cluster configuration doesn’t match the
|
||||||
|
deployed plugin, or the plugin isn’t registered.
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
|
||||||
|
a. **Check plugin registration:**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# If you have the `cnpg` plugin installed (v1.27.0+)
|
||||||
|
kubectl cnpg status -n <namespace> <cluster-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
Look for the "Plugins status" section:
|
||||||
|
```
|
||||||
|
Plugins status
|
||||||
|
Name Version Status Reported Operator Capabilities
|
||||||
|
---- ------- ------ ------------------------------
|
||||||
|
barman-cloud.cloudnative-pg.io 0.6.0 N/A Reconciler Hooks, Lifecycle Service
|
||||||
|
```
|
||||||
|
|
||||||
|
b. **Verify plugin name in `Cluster` spec**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
spec:
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectStore: <your-objectstore-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
c. **Check plugin deployment is running**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl get deployment -n cnpg-system barman-cloud
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **"rpc error: code = Unknown desc = panic caught: assignment to entry in nil map" errors**
|
||||||
|
|
||||||
|
**Cause:** Misconfiguration in the `ObjectStore` (e.g., typo or missing field).
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
|
||||||
|
- Review sidecar logs for details
|
||||||
|
- Verify `ObjectStore` configuration and secrets
|
||||||
|
- Common issues include:
|
||||||
|
- Missing or incorrect secret references
|
||||||
|
- Typos in configuration parameters
|
||||||
|
- Missing required environment variables in secrets
|
||||||
|
|
||||||
|
#### Backup performance issues
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- Backups take extremely long
|
||||||
|
- Backups timeout
|
||||||
|
|
||||||
|
**Plugin-specific considerations:**
|
||||||
|
|
||||||
|
1. **Check `ObjectStore` parallelism settings**
|
||||||
|
- Adjust `maxParallel` in `ObjectStore` configuration
|
||||||
|
- Monitor sidecar container resource usage during backups
|
||||||
|
|
||||||
|
2. **Verify plugin resource allocation**
|
||||||
|
- Check if the sidecar container has sufficient CPU/memory
|
||||||
|
- Review plugin container logs for resource-related warnings
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For Barman-specific features like compression, encryption, and performance
|
||||||
|
tuning, refer to the [Barman documentation](https://docs.pgbarman.org/latest/).
|
||||||
|
:::
|
||||||
|
|
||||||
|
### WAL Archiving Issues
|
||||||
|
|
||||||
|
#### WAL archiving stops
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- WAL files accumulate on the primary
|
||||||
|
- Cluster shows WAL archiving warnings
|
||||||
|
- Sidecar logs show WAL errors
|
||||||
|
|
||||||
|
**Debugging steps:**
|
||||||
|
|
||||||
|
1. **Check plugin sidecar logs for WAL archiving errors**
|
||||||
|
```sh
|
||||||
|
# Check recent WAL archive operations in sidecar
|
||||||
|
kubectl logs -n <namespace> <primary-pod> -c plugin-barman-cloud \
|
||||||
|
--tail=50 | grep -i wal
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check ObjectStore configuration for WAL settings**
|
||||||
|
- Ensure ObjectStore has proper WAL retention settings
|
||||||
|
- Verify credentials have permissions for WAL operations
|
||||||
|
|
||||||
|
### Restore Issues
|
||||||
|
|
||||||
|
#### Restore fails during recovery
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- New cluster stuck in recovery
|
||||||
|
- Plugin sidecar shows restore errors
|
||||||
|
- PostgreSQL won’t start
|
||||||
|
|
||||||
|
**Debugging steps:**
|
||||||
|
|
||||||
|
1. **Check plugin sidecar logs during restore**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check the sidecar logs on the recovering cluster pods
|
||||||
|
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||||
|
-c plugin-barman-cloud --tail=100
|
||||||
|
|
||||||
|
# Look for restore-related errors
|
||||||
|
kubectl logs -n <namespace> <cluster-pod-name> \
|
||||||
|
-c plugin-barman-cloud | grep -E "restore|recovery|ERROR"
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Verify plugin can access backups**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check if `ObjectStore` is properly configured for restore
|
||||||
|
kubectl get objectstores.barmancloud.cnpg.io \
|
||||||
|
-n <namespace> <objectstore-name> -o yaml
|
||||||
|
|
||||||
|
# Check PostgreSQL recovery logs
|
||||||
|
kubectl logs -n <namespace> <cluster-pod> \
|
||||||
|
-c postgres | grep -i recovery
|
||||||
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For detailed Barman restore operations and troubleshooting, refer to the
|
||||||
|
[Barman documentation](https://docs.pgbarman.org/latest/barman-cloud-restore.html).
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### Point-in-time recovery (PITR) configuration issues
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- PITR doesn’t reach target time
|
||||||
|
- WAL access errors
|
||||||
|
- Recovery halts early
|
||||||
|
|
||||||
|
**Debugging steps:**
|
||||||
|
|
||||||
|
1. **Verify PITR configuration in the `Cluster` spec**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: <cluster-restore-name>
|
||||||
|
spec:
|
||||||
|
storage:
|
||||||
|
size: 1Gi
|
||||||
|
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: origin
|
||||||
|
recoveryTarget:
|
||||||
|
targetTime: "2024-01-15T10:30:00Z"
|
||||||
|
|
||||||
|
externalClusters:
|
||||||
|
- name: origin
|
||||||
|
plugin:
|
||||||
|
enabled: true
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: <object-store-name>
|
||||||
|
serverName: <source-cluster-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check sidecar logs for WAL-related errors**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl logs -n <namespace> <cluster-pod> \
|
||||||
|
-c plugin-barman-cloud | grep -i wal
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Timestamps without an explicit timezone suffix
|
||||||
|
(e.g., `2024-01-15 10:30:00`) are interpreted as UTC.
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Always specify an explicit timezone in your timestamp to avoid ambiguity.
|
||||||
|
For example, use `2024-01-15T10:30:00Z` or `2024-01-15T10:30:00+02:00`
|
||||||
|
instead of `2024-01-15 10:30:00`.
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::note
|
||||||
|
For detailed PITR configuration and WAL management, see the
|
||||||
|
[Barman PITR documentation](https://docs.pgbarman.org/latest/).
|
||||||
|
:::
|
||||||
|
|
||||||
|
### Plugin Configuration Issues
|
||||||
|
|
||||||
|
#### Plugin cannot connect to object storage
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
|
||||||
|
- Sidecar logs show connection errors
|
||||||
|
- Backups fail with authentication or network errors
|
||||||
|
- `ObjectStore` resource reports errors
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
|
||||||
|
1. **Verify `ObjectStore` CRD configuration and secrets**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check ObjectStore resource status
|
||||||
|
kubectl get objectstores.barmancloud.cnpg.io \
|
||||||
|
-n <namespace> <objectstore-name> -o yaml
|
||||||
|
|
||||||
|
# Verify the secret exists and has correct keys for your provider
|
||||||
|
kubectl get secret -n <namespace> <secret-name> \
|
||||||
|
-o jsonpath='{.data}' | jq 'keys'
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check sidecar logs for connectivity issues**
|
||||||
|
```sh
|
||||||
|
kubectl logs -n <namespace> <cluster-pod> \
|
||||||
|
-c plugin-barman-cloud | grep -E "connect|timeout|SSL|cert"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Adjust provider-specific settings (endpoint, path style, etc.)**
|
||||||
|
- See [Object Store Configuration](object_stores.md) for provider-specific settings
|
||||||
|
- Ensure `endpointURL` and `s3UsePathStyle` match your storage type
|
||||||
|
- Verify network policies allow egress to your storage provider
|
||||||
|
|
||||||
|
## Diagnostic Commands
|
||||||
|
|
||||||
|
### Using the `cnpg` plugin for `kubectl`
|
||||||
|
|
||||||
|
The `cnpg` plugin for `kubectl` provides extended debugging capabilities.
|
||||||
|
Keep it updated:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Install or update the `cnpg` plugin
|
||||||
|
kubectl krew install cnpg
|
||||||
|
# Or using an alternative method: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install
|
||||||
|
|
||||||
|
# Check plugin status (requires CNPG 1.27.0+)
|
||||||
|
kubectl cnpg status <cluster-name> -n <namespace>
|
||||||
|
|
||||||
|
# View cluster status in detail
|
||||||
|
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
If problems persist:
|
||||||
|
|
||||||
|
1. **Check the documentation**
|
||||||
|
|
||||||
|
- [Installation Guide](installation.mdx)
|
||||||
|
- [Object Store Configuration](object_stores.md) (for provider-specific settings)
|
||||||
|
- [Usage Examples](usage.md)
|
||||||
|
|
||||||
|
|
||||||
|
2. **Gather diagnostic information**
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Create a diagnostic bundle (⚠️ sanitize these before sharing!)
|
||||||
|
kubectl get objectstores.barmancloud.cnpg.io -A -o yaml > /tmp/objectstores.yaml
|
||||||
|
kubectl get clusters.postgresql.cnpg.io -A -o yaml > /tmp/clusters.yaml
|
||||||
|
kubectl logs -n cnpg-system deployment/barman-cloud --tail=1000 > /tmp/plugin.log
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
3. **Community support**
|
||||||
|
|
||||||
|
- CloudNativePG Slack: [#cloudnativepg-users](https://cloud-native.slack.com/messages/cloudnativepg-users)
|
||||||
|
- GitHub Issues: [plugin-barman-cloud](https://github.com/cloudnative-pg/plugin-barman-cloud/issues)
|
||||||
|
|
||||||
|
|
||||||
|
4. **Include when reporting**
|
||||||
|
|
||||||
|
- CloudNativePG version
|
||||||
|
- Plugin version
|
||||||
|
- Kubernetes version
|
||||||
|
- Cloud provider and region
|
||||||
|
- Relevant configuration (⚠️ sanitize/redact sensitive information)
|
||||||
|
- Error messages and logs
|
||||||
|
- Steps to reproduce
|
||||||
|
|
||||||
|
## Known Issues and Limitations
|
||||||
|
|
||||||
|
### Current Known Issues
|
||||||
|
|
||||||
|
1. **Migration compatibility**: After migrating from in-tree backup to the
|
||||||
|
plugin, the `kubectl cnpg backup` command syntax has changed
|
||||||
|
([#353](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/353)):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Old command (in-tree, no longer works after migration)
|
||||||
|
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||||
|
--method=barmanObjectStore
|
||||||
|
|
||||||
|
# New command (plugin-based)
|
||||||
|
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||||
|
--method=plugin --plugin-name=barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin Limitations
|
||||||
|
|
||||||
|
1. **Installation method**: Currently only supports manifest and Kustomize
|
||||||
|
installation ([#351](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/351) -
|
||||||
|
Helm chart requested)
|
||||||
|
|
||||||
|
2. **Sidecar resource sharing**: The plugin sidecar container shares pod
|
||||||
|
resources with PostgreSQL
|
||||||
|
|
||||||
|
3. **Plugin restart behavior**: Restarting the sidecar container requires
|
||||||
|
restarting the entire PostgreSQL pod
|
||||||
|
|
||||||
|
## Recap of General Debugging Steps
|
||||||
|
|
||||||
|
### Check Backup Status and Identify the Target Instance
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# List all backups and their status
|
||||||
|
kubectl get backups.postgresql.cnpg.io -n <namespace>
|
||||||
|
|
||||||
|
# Get detailed backup information including error messages and target instance
|
||||||
|
kubectl describe backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name>
|
||||||
|
|
||||||
|
# Extract the target pod name from a failed backup
|
||||||
|
kubectl get backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name> \
|
||||||
|
-o jsonpath='{.status.instanceID.podName}'
|
||||||
|
|
||||||
|
# Get more details including the target pod, method, phase, and error
|
||||||
|
kubectl get backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name> \
|
||||||
|
-o jsonpath='Pod: {.status.instanceID.podName}{"\n"}Method: {.status.method}{"\n"}Phase: {.status.phase}{"\n"}Error: {.status.error}{"\n"}'
|
||||||
|
|
||||||
|
# Check the cluster status for backup-related information
|
||||||
|
kubectl cnpg status <cluster-name> -n <namespace> --verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Sidecar Logs on the Backup Target Pod
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Identify which pod was the backup target (from the previous step)
|
||||||
|
TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \
|
||||||
|
-n <namespace> <backup-name> \
|
||||||
|
-o jsonpath='{.status.instanceID.podName}')
|
||||||
|
echo "Backup target pod: $TARGET_POD"
|
||||||
|
|
||||||
|
# Check the sidecar logs on the specific target pod
|
||||||
|
kubectl logs -n <namespace> $TARGET_POD \
|
||||||
|
-c plugin-barman-cloud --tail=100
|
||||||
|
|
||||||
|
# Follow the logs in real time
|
||||||
|
kubectl logs -n <namespace> $TARGET_POD \
|
||||||
|
-c plugin-barman-cloud -f
|
||||||
|
|
||||||
|
# Check for specific errors in the target pod around the backup time
|
||||||
|
kubectl logs -n <namespace> $TARGET_POD \
|
||||||
|
-c plugin-barman-cloud --since=10m | grep -E "ERROR|FATAL|panic|failed"
|
||||||
|
|
||||||
|
# Alternative: List all cluster pods and their roles
|
||||||
|
kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> \
|
||||||
|
-o custom-columns=NAME:.metadata.name,ROLE:.metadata.labels.cnpg\\.io/instanceRole,INSTANCE:.metadata.labels.cnpg\\.io/instanceName
|
||||||
|
|
||||||
|
# Check sidecar logs on ALL cluster pods (if the target is unclear)
|
||||||
|
for pod in $(kubectl get pods -n <namespace> -l cnpg.io/cluster=<cluster-name> -o name); do
|
||||||
|
echo "=== Checking $pod ==="
|
||||||
|
kubectl logs -n <namespace> $pod -c plugin-barman-cloud \
|
||||||
|
--tail=20 | grep -i error || echo "No errors found"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Events for Backup-Related Issues
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check events for the cluster
|
||||||
|
kubectl get events -n <namespace> \
|
||||||
|
--field-selector involvedObject.name=<cluster-name>
|
||||||
|
|
||||||
|
# Check events for failed backups
|
||||||
|
kubectl get events -n <namespace> \
|
||||||
|
--field-selector involvedObject.kind=Backup
|
||||||
|
|
||||||
|
# Get all recent events in the namespace
|
||||||
|
kubectl get events -n <namespace> --sort-by='.lastTimestamp' | tail -20
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify `ObjectStore` Configuration
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Check the ObjectStore resource
|
||||||
|
kubectl get objectstores.barmancloud.cnpg.io \
|
||||||
|
-n <namespace> <objectstore-name> -o yaml
|
||||||
|
|
||||||
|
# Verify the secret exists and has the correct keys
|
||||||
|
kubectl get secret -n <namespace> <secret-name> -o yaml
|
||||||
|
# Alternatively
|
||||||
|
kubectl get secret -n <namespace> <secret-name> -o jsonpath='{.data}' | jq 'keys'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Common Error Messages and Solutions
|
||||||
|
|
||||||
|
* **"AccessDenied" or "403 Forbidden"** — Check cloud credentials and bucket permissions.
|
||||||
|
* **"NoSuchBucket"** — Verify the bucket exists and the endpoint URL is correct.
|
||||||
|
* **"Connection timeout"** — Check network connectivity and firewall rules.
|
||||||
|
* **"SSL certificate problem"** — For self-signed certificates, verify the CA bundle configuration.
|
||||||
|
|
||||||
16
web/versioned_docs/version-0.10.0/upgrades.mdx
Normal file
16
web/versioned_docs/version-0.10.0/upgrades.mdx
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 25
|
||||||
|
---
|
||||||
|
|
||||||
|
# Upgrades
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
You can upgrade the plugin simply by installing the new version. Unless
|
||||||
|
explicitly stated below or in the release notes, no special steps are required.
|
||||||
|
|
||||||
|
## Upgrading to version 0.8.x from previous versions
|
||||||
|
|
||||||
|
Version **0.8.0** introduces breaking changes to resource naming.
|
||||||
|
To complete the upgrade successfully, follow the instructions in the
|
||||||
|
["Resource name migration guide"](resource-name-migration.md).
|
||||||
283
web/versioned_docs/version-0.10.0/usage.md
Normal file
283
web/versioned_docs/version-0.10.0/usage.md
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 30
|
||||||
|
---
|
||||||
|
|
||||||
|
# Using the Barman Cloud Plugin
|
||||||
|
|
||||||
|
<!-- SPDX-License-Identifier: CC-BY-4.0 -->
|
||||||
|
|
||||||
|
After [installing the plugin](installation.mdx) in the same namespace as the
|
||||||
|
CloudNativePG operator, enabling your PostgreSQL cluster to use the Barman
|
||||||
|
Cloud Plugin involves just a few steps:
|
||||||
|
|
||||||
|
- Defining the object store containing your WAL archive and base backups, using
|
||||||
|
your preferred [provider](object_stores.md)
|
||||||
|
- Instructing the Postgres cluster to use the Barman Cloud Plugin
|
||||||
|
|
||||||
|
From that moment, you’ll be able to issue on-demand backups or define a backup
|
||||||
|
schedule, as well as rely on the object store for recovery operations.
|
||||||
|
|
||||||
|
The rest of this page details each step, using MinIO as object store provider.
|
||||||
|
|
||||||
|
## Defining the `ObjectStore`
|
||||||
|
|
||||||
|
An `ObjectStore` resource must be created for each object store used in your
|
||||||
|
PostgreSQL architecture. Here's an example configuration using MinIO:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
destinationPath: s3://backups/
|
||||||
|
endpointURL: http://minio:9000
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
```
|
||||||
|
|
||||||
|
The `.spec.configuration` schema follows the same format as the
|
||||||
|
[in-tree barman-cloud support](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration).
|
||||||
|
Refer to [the CloudNativePG documentation](https://cloudnative-pg.io/documentation/preview/backup_barmanobjectstore/)
|
||||||
|
for additional details.
|
||||||
|
|
||||||
|
:::important
|
||||||
|
The `serverName` parameter in the `ObjectStore` resource is retained solely for
|
||||||
|
API compatibility with the in-tree `barmanObjectStore` and must always be left empty.
|
||||||
|
When needed, use the `serverName` plugin parameter in the Cluster configuration instead.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Configuring WAL Archiving
|
||||||
|
|
||||||
|
Once the `ObjectStore` is defined, you can configure your PostgreSQL cluster
|
||||||
|
to archive WALs by referencing the store in the `.spec.plugins` section:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-example
|
||||||
|
spec:
|
||||||
|
instances: 3
|
||||||
|
imagePullPolicy: Always
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
isWALArchiver: true
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store
|
||||||
|
storage:
|
||||||
|
size: 1Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration enables both WAL archiving and data directory backups.
|
||||||
|
|
||||||
|
## Performing a Base Backup
|
||||||
|
|
||||||
|
Once WAL archiving is enabled, the cluster is ready for backups. Backups can be
|
||||||
|
created either declaratively (with YAML manifests) or imperatively (with the
|
||||||
|
`cnpg` plugin).
|
||||||
|
|
||||||
|
### Declarative approach (YAML manifest)
|
||||||
|
|
||||||
|
Create a backup resource by applying a YAML manifest:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Backup
|
||||||
|
metadata:
|
||||||
|
name: backup-example
|
||||||
|
spec:
|
||||||
|
cluster:
|
||||||
|
name: cluster-example
|
||||||
|
method: plugin
|
||||||
|
pluginConfiguration:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
|
||||||
|
### Imperative approach (using the `cnpg` plugin)
|
||||||
|
|
||||||
|
The quickest way to trigger an on-demand backup is with the `cnpg` plugin:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||||
|
--method=plugin \
|
||||||
|
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note Migration from in-tree backups
|
||||||
|
If you are migrating from the in-tree backup system, note the change in syntax:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Old command (in-tree backup)
|
||||||
|
kubectl cnpg backup -n <namespace> <cluster-name> --method=barmanObjectStore
|
||||||
|
|
||||||
|
# New command (plugin-based backup)
|
||||||
|
kubectl cnpg backup -n <namespace> <cluster-name> \
|
||||||
|
--method=plugin \
|
||||||
|
--plugin-name=barman-cloud.cloudnative-pg.io
|
||||||
|
```
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Restoring a Cluster
|
||||||
|
|
||||||
|
To restore a cluster from an object store, create a new `Cluster` resource that
|
||||||
|
references the store containing the backup. Below is an example configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-restore
|
||||||
|
spec:
|
||||||
|
instances: 3
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: source
|
||||||
|
externalClusters:
|
||||||
|
- name: source
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store
|
||||||
|
serverName: cluster-example
|
||||||
|
storage:
|
||||||
|
size: 1Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
:::important
|
||||||
|
The above configuration does **not** enable WAL archiving for the restored cluster.
|
||||||
|
:::
|
||||||
|
|
||||||
|
To enable WAL archiving for the restored cluster, include the `.spec.plugins`
|
||||||
|
section alongside the `externalClusters.plugin` section, as shown below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-restore
|
||||||
|
spec:
|
||||||
|
instances: 3
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: source
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
isWALArchiver: true
|
||||||
|
parameters:
|
||||||
|
# Backup Object Store (push, read-write)
|
||||||
|
barmanObjectName: minio-store-bis
|
||||||
|
externalClusters:
|
||||||
|
- name: source
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
# Recovery Object Store (pull, read-only)
|
||||||
|
barmanObjectName: minio-store
|
||||||
|
serverName: cluster-example
|
||||||
|
storage:
|
||||||
|
size: 1Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
The same object store may be used for both transaction log archiving and
|
||||||
|
restoring a cluster, or you can configure separate stores for these purposes.
|
||||||
|
|
||||||
|
## Configuring Replica Clusters
|
||||||
|
|
||||||
|
You can set up a distributed topology by combining the previously defined
|
||||||
|
configurations with the `.spec.replica` section. Below is an example of how to
|
||||||
|
define a replica cluster:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-dc-a
|
||||||
|
spec:
|
||||||
|
instances: 3
|
||||||
|
primaryUpdateStrategy: unsupervised
|
||||||
|
|
||||||
|
storage:
|
||||||
|
storageClass: csi-hostpath-sc
|
||||||
|
size: 1Gi
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
|
isWALArchiver: true
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store-a
|
||||||
|
|
||||||
|
replica:
|
||||||
|
self: cluster-dc-a
|
||||||
|
primary: cluster-dc-a
|
||||||
|
source: cluster-dc-b
|
||||||
|
|
||||||
|
externalClusters:
|
||||||
|
- name: cluster-dc-a
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store-a
|
||||||
|
|
||||||
|
- name: cluster-dc-b
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store-b
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuring the plugin instance sidecar
|
||||||
|
|
||||||
|
The Barman Cloud Plugin runs as a sidecar container next to each PostgreSQL
|
||||||
|
instance pod. It manages backup, WAL archiving, and restore processes.
|
||||||
|
|
||||||
|
Configuration comes from multiple `ObjectStore` resources:
|
||||||
|
|
||||||
|
1. The one referenced in the
|
||||||
|
`.spec.plugins` section of the `Cluster`. This is the
|
||||||
|
object store used for WAL archiving and base backups.
|
||||||
|
2. The one referenced in the external cluster
|
||||||
|
used in the `.spec.replica.source` section of the `Cluster`. This is
|
||||||
|
used by the log-shipping designated primary to get the WAL files.
|
||||||
|
3. The one referenced in the
|
||||||
|
`.spec.bootstrap.recovery.source` section of the `Cluster`. Used by
|
||||||
|
the initial recovery job to create the cluster from an existing backup.
|
||||||
|
|
||||||
|
You can fine-tune sidecar behavior in the `.spec.instanceSidecarConfiguration`
|
||||||
|
of your ObjectStore. These settings apply to all PostgreSQL instances that use
|
||||||
|
this object store. Any updates take effect at the next `Cluster` reconciliation,
|
||||||
|
and could generate a rollout of the `Cluster`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: barmancloud.cnpg.io/v1
|
||||||
|
kind: ObjectStore
|
||||||
|
metadata:
|
||||||
|
name: minio-store
|
||||||
|
spec:
|
||||||
|
configuration:
|
||||||
|
# [...]
|
||||||
|
instanceSidecarConfiguration:
|
||||||
|
retentionPolicyIntervalSeconds: 1800
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "XXX"
|
||||||
|
cpu: "YYY"
|
||||||
|
limits:
|
||||||
|
memory: "XXX"
|
||||||
|
cpu: "YYY"
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If more than one `ObjectStore` applies, the `instanceSidecarConfiguration` of
|
||||||
|
the one set in `.spec.plugins` has priority.
|
||||||
|
:::
|
||||||
8
web/versioned_sidebars/version-0.10.0-sidebars.json
Normal file
8
web/versioned_sidebars/version-0.10.0-sidebars.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"docs": [
|
||||||
|
{
|
||||||
|
"type": "autogenerated",
|
||||||
|
"dirName": "."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@ -1,4 +1,5 @@
|
|||||||
[
|
[
|
||||||
|
"0.10.0",
|
||||||
"0.9.0",
|
"0.9.0",
|
||||||
"0.8.0",
|
"0.8.0",
|
||||||
"0.7.0",
|
"0.7.0",
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user