mirror of
https://github.com/cloudnative-pg/plugin-barman-cloud.git
synced 2026-01-11 21:23:12 +01:00
feat(spike): restore (#29)
Signed-off-by: Armando Ruocco <armando.ruocco@enterprisedb.com> Signed-off-by: Francesco Canovai <francesco.canovai@enterprisedb.com> Signed-off-by: Marco Nenciarini <marco.nenciarini@enterprisedb.com> Signed-off-by: Leonardo Cecchi <leonardo.cecchi@enterprisedb.com> Co-authored-by: Francesco Canovai <francesco.canovai@enterprisedb.com> Co-authored-by: Marco Nenciarini <marco.nenciarini@enterprisedb.com> Co-authored-by: Leonardo Cecchi <leonardo.cecchi@enterprisedb.com>
This commit is contained in:
parent
5d0038ecc5
commit
240077c771
2
Makefile
2
Makefile
@ -1,5 +1,5 @@
|
|||||||
# Image URL to use all building/pushing image targets
|
# Image URL to use all building/pushing image targets
|
||||||
IMG ?= controller:latest
|
IMG ?= plugin-barman-cloud:latest
|
||||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||||
ENVTEST_K8S_VERSION = 1.31.0
|
ENVTEST_K8S_VERSION = 1.31.0
|
||||||
|
|
||||||
|
|||||||
2
cmd/manager/doc.go
Normal file
2
cmd/manager/doc.go
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Package main is the entrypoint for the plugin
|
||||||
|
package main
|
||||||
37
cmd/manager/main.go
Normal file
37
cmd/manager/main.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/log"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/instance"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/operator"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/restore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cobra.EnableTraverseRunHooks = true
|
||||||
|
|
||||||
|
logFlags := &log.Flags{}
|
||||||
|
rootCmd := &cobra.Command{
|
||||||
|
Use: "manager [cmd]",
|
||||||
|
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||||
|
logFlags.ConfigureLogging()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
logFlags.AddFlags(rootCmd.PersistentFlags())
|
||||||
|
|
||||||
|
rootCmd.AddCommand(instance.NewCmd())
|
||||||
|
rootCmd.AddCommand(operator.NewCmd())
|
||||||
|
rootCmd.AddCommand(restore.NewCmd())
|
||||||
|
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,98 +0,0 @@
|
|||||||
// Package main is the entrypoint of operator plugin
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/cloudnative-pg/machinery/pkg/log"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
|
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
cobra.EnableTraverseRunHooks = true
|
|
||||||
|
|
||||||
logFlags := &log.Flags{}
|
|
||||||
rootCmd := &cobra.Command{
|
|
||||||
Use: "plugin-barman-cloud",
|
|
||||||
Short: "Starts the BarmanObjectStore reconciler and the Barman Cloud CNPG-i plugin",
|
|
||||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
|
||||||
if len(viper.GetString("sidecar-image")) == 0 {
|
|
||||||
return fmt.Errorf("missing required SIDECAR_IMAGE environment variable")
|
|
||||||
}
|
|
||||||
|
|
||||||
return operator.Start(cmd.Context())
|
|
||||||
},
|
|
||||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
|
||||||
logFlags.ConfigureLogging()
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
logFlags.AddFlags(rootCmd.PersistentFlags())
|
|
||||||
|
|
||||||
rootCmd.Flags().String("metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
|
||||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
|
||||||
_ = viper.BindPFlag("metrics-bind-address", rootCmd.Flags().Lookup("metrics-bind-address"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
|
||||||
_ = viper.BindPFlag("health-probe-bind-address", rootCmd.Flags().Lookup("health-probe-bind-address"))
|
|
||||||
|
|
||||||
rootCmd.Flags().Bool("leader-elect", false,
|
|
||||||
"Enable leader election for controller manager. "+
|
|
||||||
"Enabling this will ensure there is only one active controller manager.")
|
|
||||||
_ = viper.BindPFlag("leader-elect", rootCmd.Flags().Lookup("leader-elect"))
|
|
||||||
|
|
||||||
rootCmd.Flags().Bool("metrics-secure", true,
|
|
||||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
|
||||||
_ = viper.BindPFlag("metrics-secure", rootCmd.Flags().Lookup("metrics-secure"))
|
|
||||||
|
|
||||||
rootCmd.Flags().Bool("enable-http2", false,
|
|
||||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
|
||||||
_ = viper.BindPFlag("enable-http2", rootCmd.Flags().Lookup("enable-http2"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String(
|
|
||||||
"plugin-path",
|
|
||||||
"",
|
|
||||||
"The plugins socket path",
|
|
||||||
)
|
|
||||||
_ = viper.BindPFlag("plugin-path", rootCmd.Flags().Lookup("plugin-path"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String(
|
|
||||||
"server-cert",
|
|
||||||
"",
|
|
||||||
"The public key to be used for the server process",
|
|
||||||
)
|
|
||||||
_ = viper.BindPFlag("server-cert", rootCmd.Flags().Lookup("server-cert"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String(
|
|
||||||
"server-key",
|
|
||||||
"",
|
|
||||||
"The key to be used for the server process",
|
|
||||||
)
|
|
||||||
_ = viper.BindPFlag("server-key", rootCmd.Flags().Lookup("server-key"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String(
|
|
||||||
"client-cert",
|
|
||||||
"",
|
|
||||||
"The client public key to verify the connection",
|
|
||||||
)
|
|
||||||
_ = viper.BindPFlag("client-cert", rootCmd.Flags().Lookup("client-cert"))
|
|
||||||
|
|
||||||
rootCmd.Flags().String(
|
|
||||||
"server-address",
|
|
||||||
"",
|
|
||||||
"The address where to listen (i.e. 0:9090)",
|
|
||||||
)
|
|
||||||
_ = viper.BindPFlag("server-address", rootCmd.Flags().Lookup("server-address"))
|
|
||||||
|
|
||||||
_ = viper.BindEnv("sidecar-image", "SIDECAR_IMAGE")
|
|
||||||
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -40,6 +40,14 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- postgresql.cnpg.io
|
||||||
|
resources:
|
||||||
|
- backups
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- rbac.authorization.k8s.io
|
- rbac.authorization.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.23 AS builder
|
FROM --platform=$BUILDPLATFORM golang:1.23.1 AS gobuilder
|
||||||
ARG TARGETOS
|
ARG TARGETOS
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
@ -12,9 +12,9 @@ COPY ../go.sum go.sum
|
|||||||
RUN go mod download
|
RUN go mod download
|
||||||
|
|
||||||
# Copy the go source
|
# Copy the go source
|
||||||
COPY ../cmd/operator/main.go cmd/operator/main.go
|
COPY ../cmd/manager/main.go cmd/manager/main.go
|
||||||
COPY ../api api/
|
COPY ../api/ api/
|
||||||
COPY ../internal internal/
|
COPY ../internal/ internal/
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||||
@ -22,7 +22,7 @@ COPY ../internal internal/
|
|||||||
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
||||||
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
||||||
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \
|
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \
|
||||||
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/operator/main.go
|
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/manager/main.go
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
@ -42,7 +42,7 @@ LABEL summary="$SUMMARY" \
|
|||||||
release="1"
|
release="1"
|
||||||
|
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=builder /workspace/manager .
|
COPY --from=gobuilder /workspace/manager .
|
||||||
USER 65532:65532
|
USER 65532:65532
|
||||||
|
|
||||||
ENTRYPOINT ["/manager"]
|
ENTRYPOINT ["/manager"]
|
||||||
@ -4,6 +4,32 @@
|
|||||||
# * instance plugin
|
# * instance plugin
|
||||||
# Both components are built before going into a distroless container
|
# Both components are built before going into a distroless container
|
||||||
|
|
||||||
|
# Build the manager binary
|
||||||
|
FROM --platform=$BUILDPLATFORM golang:1.23.1 AS gobuilder
|
||||||
|
ARG TARGETOS
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
# Copy the Go Modules manifests
|
||||||
|
COPY ../go.mod go.mod
|
||||||
|
COPY ../go.sum go.sum
|
||||||
|
# cache deps before building and copying source so that we don't need to re-download as much
|
||||||
|
# and so that source changes don't invalidate our downloaded layer
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy the go source
|
||||||
|
COPY ../cmd/manager/main.go cmd/manager/main.go
|
||||||
|
COPY ../api/ api/
|
||||||
|
COPY ../internal/ internal/
|
||||||
|
|
||||||
|
# Build
|
||||||
|
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||||
|
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
|
||||||
|
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
||||||
|
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
||||||
|
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/manager/main.go
|
||||||
|
|
||||||
# Build barman-cloud
|
# Build barman-cloud
|
||||||
# pip will build everything inside /usr/ since this is the case
|
# pip will build everything inside /usr/ since this is the case
|
||||||
# we should build and then copy every file into a destination that will
|
# we should build and then copy every file into a destination that will
|
||||||
@ -19,17 +45,6 @@ RUN mkdir /new-usr/ && \
|
|||||||
cp -r --parents /usr/local/lib/ /usr/lib/*-linux-gnu/ /usr/local/bin/ \
|
cp -r --parents /usr/local/lib/ /usr/lib/*-linux-gnu/ /usr/local/bin/ \
|
||||||
/new-usr/
|
/new-usr/
|
||||||
|
|
||||||
# Build instance
|
|
||||||
# This step builds a simple instance app that will manage and handle
|
|
||||||
# the barman-cloud commands inside the sidecar
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.23.1 AS gobuilder
|
|
||||||
ENV CGO_ENABLED=0
|
|
||||||
COPY .. /src
|
|
||||||
ARG TARGETOS
|
|
||||||
ARG TARGETARCH
|
|
||||||
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \
|
|
||||||
GOOS=$TARGETOS GOARCH=$TARGETARCH go build -C /src -o /build/instance /src/cmd/instance/main.go
|
|
||||||
|
|
||||||
# Joint process
|
# Joint process
|
||||||
# Now we put everything that was build from the origin into our
|
# Now we put everything that was build from the origin into our
|
||||||
# distroless container
|
# distroless container
|
||||||
@ -49,6 +64,6 @@ LABEL summary="$SUMMARY" \
|
|||||||
release="1"
|
release="1"
|
||||||
|
|
||||||
COPY --from=pythonbuilder /new-usr/* /usr/
|
COPY --from=pythonbuilder /new-usr/* /usr/
|
||||||
COPY --from=gobuilder /build/instance /usr/local/bin/instance
|
COPY --from=gobuilder /workspace/manager /manager
|
||||||
USER 26:26
|
USER 26:26
|
||||||
ENTRYPOINT ["/usr/local/bin/instance"]
|
ENTRYPOINT ["/manager"]
|
||||||
|
|||||||
@ -4,7 +4,7 @@ metadata:
|
|||||||
name: cluster-example
|
name: cluster-example
|
||||||
spec:
|
spec:
|
||||||
instances: 3
|
instances: 3
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
plugins:
|
plugins:
|
||||||
- name: barman-cloud.cloudnative-pg.io
|
- name: barman-cloud.cloudnative-pg.io
|
||||||
parameters:
|
parameters:
|
||||||
|
|||||||
22
docs/examples/cluster-restore.yaml
Normal file
22
docs/examples/cluster-restore.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-restore
|
||||||
|
spec:
|
||||||
|
instances: 3
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: source
|
||||||
|
|
||||||
|
externalClusters:
|
||||||
|
- name: source
|
||||||
|
plugin:
|
||||||
|
name: barman-cloud.cloudnative-pg.io
|
||||||
|
parameters:
|
||||||
|
barmanObjectName: minio-store
|
||||||
|
serverName: cluster-example
|
||||||
|
|
||||||
|
storage:
|
||||||
|
size: 1Gi
|
||||||
45
go.mod
45
go.mod
@ -1,22 +1,25 @@
|
|||||||
module github.com/cloudnative-pg/plugin-barman-cloud
|
module github.com/cloudnative-pg/plugin-barman-cloud
|
||||||
|
|
||||||
go 1.22.0
|
go 1.23
|
||||||
|
|
||||||
|
toolchain go1.23.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a
|
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542
|
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241031170209-ad2b0d78a230
|
||||||
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248
|
github.com/cloudnative-pg/cnpg-i v0.0.0-20241030162745-80b6d07403c1
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6
|
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241014090747-e9c2b3738d19
|
||||||
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392
|
github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b
|
||||||
github.com/onsi/ginkgo/v2 v2.20.2
|
github.com/onsi/ginkgo/v2 v2.20.2
|
||||||
github.com/onsi/gomega v1.34.2
|
github.com/onsi/gomega v1.34.2
|
||||||
github.com/spf13/cobra v1.8.1
|
github.com/spf13/cobra v1.8.1
|
||||||
github.com/spf13/viper v1.19.0
|
github.com/spf13/viper v1.19.0
|
||||||
google.golang.org/grpc v1.67.1
|
google.golang.org/grpc v1.67.1
|
||||||
k8s.io/api v0.31.1
|
k8s.io/api v0.31.2
|
||||||
k8s.io/apimachinery v0.31.1
|
k8s.io/apimachinery v0.31.2
|
||||||
k8s.io/client-go v0.31.1
|
k8s.io/client-go v0.31.2
|
||||||
sigs.k8s.io/controller-runtime v0.19.0
|
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6
|
||||||
|
sigs.k8s.io/controller-runtime v0.19.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -27,6 +30,7 @@ require (
|
|||||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||||
@ -70,8 +74,8 @@ require (
|
|||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 // indirect
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 // indirect
|
||||||
github.com/prometheus/client_golang v1.20.3 // indirect
|
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.59.1 // indirect
|
github.com/prometheus/common v0.59.1 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
@ -98,28 +102,27 @@ require (
|
|||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
|
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
|
||||||
golang.org/x/net v0.29.0 // indirect
|
golang.org/x/net v0.30.0 // indirect
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
golang.org/x/oauth2 v0.23.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.25.0 // indirect
|
golang.org/x/sys v0.26.0 // indirect
|
||||||
golang.org/x/term v0.24.0 // indirect
|
golang.org/x/term v0.25.0 // indirect
|
||||||
golang.org/x/text v0.18.0 // indirect
|
golang.org/x/text v0.19.0 // indirect
|
||||||
golang.org/x/time v0.6.0 // indirect
|
golang.org/x/time v0.7.0 // indirect
|
||||||
golang.org/x/tools v0.25.0 // indirect
|
golang.org/x/tools v0.25.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
|
||||||
google.golang.org/protobuf v1.34.2 // indirect
|
google.golang.org/protobuf v1.35.1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.31.1 // indirect
|
k8s.io/apiextensions-apiserver v0.31.2 // indirect
|
||||||
k8s.io/apiserver v0.31.1 // indirect
|
k8s.io/apiserver v0.31.2 // indirect
|
||||||
k8s.io/component-base v0.31.1 // indirect
|
k8s.io/component-base v0.31.2 // indirect
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect
|
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect
|
||||||
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
|
|||||||
78
go.sum
78
go.sum
@ -14,16 +14,18 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
|
|||||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b h1:LZ9tIgKmWb8ZvyLg/J8ExXtmBtEWP2dr3Y4TU4nCq/w=
|
||||||
|
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b/go.mod h1:mzd1EvoLYy16jJdne6/4nwhoj7t4IZ0MqJMEH4mla8Q=
|
||||||
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME=
|
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME=
|
||||||
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE=
|
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE=
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542 h1:IXf5lj+m4CBqzckQ9L/9hJ01JUoVw5N0FuPex0sVdVo=
|
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241031170209-ad2b0d78a230 h1:zRqm1WUMOqkPWGyvtvCAdWlQ+WTtb0iQA/rvCar27/E=
|
||||||
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542/go.mod h1:L8M+kTGpz/eaLZj46+4sARvO/vDYlo/m1xOigI/ghBA=
|
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241031170209-ad2b0d78a230/go.mod h1:La89zdElTqqZ5LXHFm/UjOwvS9iHSE8GuOW4fYUgHw8=
|
||||||
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248 h1:eUGzb7YNjVLilwhgZoe4hDOO70fci3oqb/ZzQFbN3xg=
|
github.com/cloudnative-pg/cnpg-i v0.0.0-20241030162745-80b6d07403c1 h1:v3Vr+FH5BXmS7Eqx17u51oijZ4T7y62vUMCUAr7CffE=
|
||||||
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248/go.mod h1:K9/4eAT3rh2bKIWyujoN8BIPRXa4d1Ls+eBY8PE8y6w=
|
github.com/cloudnative-pg/cnpg-i v0.0.0-20241030162745-80b6d07403c1/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6 h1:C4CU5fBTYTiJBPDqcgHpXSc5IvRTy+5KTaFZzdKHfAQ=
|
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241014090747-e9c2b3738d19 h1:qy+LrScvQpIwt4qeg9FfCJuoC9CbX/kpFGLF8vSobXg=
|
||||||
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6/go.mod h1:mHEVy/Guae+rij1qlgwHg+lyFKDX48qjTL4lAqE7OJs=
|
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241014090747-e9c2b3738d19/go.mod h1:X6r1fRuUEIAv4+5SSBY2RmQ201K6GcptOXgnmaX/8tY=
|
||||||
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0=
|
github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg=
|
||||||
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM=
|
github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -138,10 +140,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns=
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 h1:F/MALZ518KfI1zEg+Kg8/uTzoXKDyqw+LNC/5irJlJE=
|
||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU=
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA=
|
||||||
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
|
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||||
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
|
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
|
||||||
@ -226,8 +228,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -238,16 +240,16 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||||
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
@ -266,8 +268,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:
|
|||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@ -284,18 +286,18 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
|
||||||
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
|
||||||
k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40=
|
k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
|
||||||
k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ=
|
k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
|
||||||
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
|
||||||
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||||
k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c=
|
k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
|
||||||
k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM=
|
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
||||||
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
||||||
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
||||||
k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8=
|
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
||||||
k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w=
|
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
|
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
|
||||||
@ -304,8 +306,8 @@ k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY
|
|||||||
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||||
sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
|
sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk=
|
||||||
sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
|
|||||||
@ -1,28 +1,20 @@
|
|||||||
// Package main is the entrypoint of operator plugin
|
// Package instance is the entrypoint of instance plugin
|
||||||
package main
|
package instance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/cloudnative-pg/machinery/pkg/log"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/instance"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/instance"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
// NewCmd creates a new instance command
|
||||||
cobra.EnableTraverseRunHooks = true
|
func NewCmd() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
logFlags := &log.Flags{}
|
|
||||||
rootCmd := &cobra.Command{
|
|
||||||
Use: "instance",
|
Use: "instance",
|
||||||
Short: "Starts the Barman Cloud CNPG-i sidecar plugin",
|
Short: "Starts the Barman Cloud CNPG-I sidecar plugin",
|
||||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
|
||||||
logFlags.ConfigureLogging()
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
requiredSettings := []string{
|
requiredSettings := []string{
|
||||||
"namespace",
|
"namespace",
|
||||||
@ -42,8 +34,6 @@ func main() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
logFlags.AddFlags(rootCmd.PersistentFlags())
|
|
||||||
|
|
||||||
_ = viper.BindEnv("namespace", "NAMESPACE")
|
_ = viper.BindEnv("namespace", "NAMESPACE")
|
||||||
_ = viper.BindEnv("barman-object-name", "BARMAN_OBJECT_NAME")
|
_ = viper.BindEnv("barman-object-name", "BARMAN_OBJECT_NAME")
|
||||||
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
|
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
|
||||||
@ -51,8 +41,5 @@ func main() {
|
|||||||
_ = viper.BindEnv("pgdata", "PGDATA")
|
_ = viper.BindEnv("pgdata", "PGDATA")
|
||||||
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")
|
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")
|
||||||
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
return cmd
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
88
internal/cmd/operator/main.go
Normal file
88
internal/cmd/operator/main.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Package operator is the entrypoint of operator plugin
|
||||||
|
package operator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewCmd creates a new operator command
|
||||||
|
func NewCmd() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "operator",
|
||||||
|
Short: "Starts the BarmanObjectStore reconciler and the Barman Cloud CNPG-i plugin",
|
||||||
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
|
if len(viper.GetString("sidecar-image")) == 0 {
|
||||||
|
return fmt.Errorf("missing required SIDECAR_IMAGE environment variable")
|
||||||
|
}
|
||||||
|
|
||||||
|
return operator.Start(cmd.Context())
|
||||||
|
},
|
||||||
|
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Flags().String("metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||||
|
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||||
|
_ = viper.BindPFlag("metrics-bind-address", cmd.Flags().Lookup("metrics-bind-address"))
|
||||||
|
|
||||||
|
cmd.Flags().String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||||
|
_ = viper.BindPFlag("health-probe-bind-address", cmd.Flags().Lookup("health-probe-bind-address"))
|
||||||
|
|
||||||
|
cmd.Flags().Bool("leader-elect", false,
|
||||||
|
"Enable leader election for controller manager. "+
|
||||||
|
"Enabling this will ensure there is only one active controller manager.")
|
||||||
|
_ = viper.BindPFlag("leader-elect", cmd.Flags().Lookup("leader-elect"))
|
||||||
|
|
||||||
|
cmd.Flags().Bool("metrics-secure", true,
|
||||||
|
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||||
|
_ = viper.BindPFlag("metrics-secure", cmd.Flags().Lookup("metrics-secure"))
|
||||||
|
|
||||||
|
cmd.Flags().Bool("enable-http2", false,
|
||||||
|
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||||
|
_ = viper.BindPFlag("enable-http2", cmd.Flags().Lookup("enable-http2"))
|
||||||
|
|
||||||
|
cmd.Flags().String(
|
||||||
|
"plugin-path",
|
||||||
|
"",
|
||||||
|
"The plugins socket path",
|
||||||
|
)
|
||||||
|
_ = viper.BindPFlag("plugin-path", cmd.Flags().Lookup("plugin-path"))
|
||||||
|
|
||||||
|
cmd.Flags().String(
|
||||||
|
"server-cert",
|
||||||
|
"",
|
||||||
|
"The public key to be used for the server process",
|
||||||
|
)
|
||||||
|
_ = viper.BindPFlag("server-cert", cmd.Flags().Lookup("server-cert"))
|
||||||
|
|
||||||
|
cmd.Flags().String(
|
||||||
|
"server-key",
|
||||||
|
"",
|
||||||
|
"The key to be used for the server process",
|
||||||
|
)
|
||||||
|
_ = viper.BindPFlag("server-key", cmd.Flags().Lookup("server-key"))
|
||||||
|
|
||||||
|
cmd.Flags().String(
|
||||||
|
"client-cert",
|
||||||
|
"",
|
||||||
|
"The client public key to verify the connection",
|
||||||
|
)
|
||||||
|
_ = viper.BindPFlag("client-cert", cmd.Flags().Lookup("client-cert"))
|
||||||
|
|
||||||
|
cmd.Flags().String(
|
||||||
|
"server-address",
|
||||||
|
"",
|
||||||
|
"The address where to listen (i.e. 0:9090)",
|
||||||
|
)
|
||||||
|
_ = viper.BindPFlag("server-address", cmd.Flags().Lookup("server-address"))
|
||||||
|
|
||||||
|
_ = viper.BindEnv("sidecar-image", "SIDECAR_IMAGE")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
45
internal/cmd/restore/main.go
Normal file
45
internal/cmd/restore/main.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Package restore is the entrypoint of restore capabilities
|
||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/restore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewCmd creates the "restore" subcommand
|
||||||
|
func NewCmd() *cobra.Command {
|
||||||
|
cobra.EnableTraverseRunHooks = true
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "restore",
|
||||||
|
Short: "Starts the Barman Cloud CNPG-I sidecar plugin",
|
||||||
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
|
requiredSettings := []string{
|
||||||
|
"namespace",
|
||||||
|
"cluster-name",
|
||||||
|
"pod-name",
|
||||||
|
"spool-directory",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range requiredSettings {
|
||||||
|
if len(viper.GetString(k)) == 0 {
|
||||||
|
return fmt.Errorf("missing required %s setting", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return restore.Start(cmd.Context())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = viper.BindEnv("namespace", "NAMESPACE")
|
||||||
|
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
|
||||||
|
_ = viper.BindEnv("pod-name", "POD_NAME")
|
||||||
|
_ = viper.BindEnv("pgdata", "PGDATA")
|
||||||
|
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
72
internal/cnpgi/common/common.go
Normal file
72
internal/cnpgi/common/common.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: refactor.
|
||||||
|
const (
|
||||||
|
// ScratchDataDirectory is the directory to be used for scratch data.
|
||||||
|
ScratchDataDirectory = "/controller"
|
||||||
|
|
||||||
|
// CertificatesDir location to store the certificates.
|
||||||
|
CertificatesDir = ScratchDataDirectory + "/certificates/"
|
||||||
|
|
||||||
|
// BarmanBackupEndpointCACertificateLocation is the location where the barman endpoint
|
||||||
|
// CA certificate is stored.
|
||||||
|
BarmanBackupEndpointCACertificateLocation = CertificatesDir + BarmanBackupEndpointCACertificateFileName
|
||||||
|
|
||||||
|
// BarmanBackupEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
||||||
|
// CA certificate for backups is stored.
|
||||||
|
BarmanBackupEndpointCACertificateFileName = "backup-" + BarmanEndpointCACertificateFileName
|
||||||
|
|
||||||
|
// BarmanRestoreEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
||||||
|
// CA certificate for restores is stored.
|
||||||
|
BarmanRestoreEndpointCACertificateFileName = "restore-" + BarmanEndpointCACertificateFileName
|
||||||
|
|
||||||
|
// BarmanEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
||||||
|
// CA certificate is stored.
|
||||||
|
BarmanEndpointCACertificateFileName = "barman-ca.crt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetRestoreCABundleEnv gets the enveronment variables to be used when custom
|
||||||
|
// Object Store CA is present
|
||||||
|
func GetRestoreCABundleEnv(configuration *barmanapi.BarmanObjectStoreConfiguration) []string {
|
||||||
|
var env []string
|
||||||
|
|
||||||
|
if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil {
|
||||||
|
env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", BarmanBackupEndpointCACertificateLocation))
|
||||||
|
} else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil {
|
||||||
|
env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", BarmanBackupEndpointCACertificateLocation))
|
||||||
|
}
|
||||||
|
return env
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeEnv merges all the values inside incomingEnv into env.
|
||||||
|
func MergeEnv(env []string, incomingEnv []string) []string {
|
||||||
|
result := make([]string, len(env), len(env)+len(incomingEnv))
|
||||||
|
copy(result, env)
|
||||||
|
|
||||||
|
for _, incomingItem := range incomingEnv {
|
||||||
|
incomingKV := strings.SplitAfterN(incomingItem, "=", 2)
|
||||||
|
if len(incomingKV) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for idx, item := range result {
|
||||||
|
if strings.HasPrefix(item, incomingKV[0]) {
|
||||||
|
result[idx] = incomingItem
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
result = append(result, incomingItem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
2
internal/cnpgi/common/doc.go
Normal file
2
internal/cnpgi/common/doc.go
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Package common contains reusable structs and methods for CNPGI plugins.
|
||||||
|
package common
|
||||||
@ -10,6 +10,7 @@ import (
|
|||||||
barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup"
|
barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup"
|
||||||
barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities"
|
barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities"
|
||||||
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
|
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
|
||||||
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
|
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
|
||||||
"github.com/cloudnative-pg/cnpg-i/pkg/backup"
|
"github.com/cloudnative-pg/cnpg-i/pkg/backup"
|
||||||
"github.com/cloudnative-pg/machinery/pkg/fileutils"
|
"github.com/cloudnative-pg/machinery/pkg/fileutils"
|
||||||
@ -19,6 +20,7 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,8 +68,16 @@ func (b BackupServiceImplementation) Backup(
|
|||||||
) (*backup.BackupResult, error) {
|
) (*backup.BackupResult, error) {
|
||||||
contextLogger := log.FromContext(ctx)
|
contextLogger := log.FromContext(ctx)
|
||||||
|
|
||||||
|
contextLogger.Info("Starting backup")
|
||||||
|
|
||||||
|
var cluster cnpgv1.Cluster
|
||||||
|
if err := b.Client.Get(ctx, b.ClusterObjectKey, &cluster); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var objectStore barmancloudv1.ObjectStore
|
var objectStore barmancloudv1.ObjectStore
|
||||||
if err := b.Client.Get(ctx, b.BarmanObjectKey, &objectStore); err != nil {
|
if err := b.Client.Get(ctx, b.BarmanObjectKey, &objectStore); err != nil {
|
||||||
|
contextLogger.Error(err, "while getting object store", "key", b.BarmanObjectKey)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,6 +88,7 @@ func (b BackupServiceImplementation) Backup(
|
|||||||
|
|
||||||
capabilities, err := barmanCapabilities.CurrentCapabilities()
|
capabilities, err := barmanCapabilities.CurrentCapabilities()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
contextLogger.Error(err, "while getting capabilities")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
backupCmd := barmanBackup.NewBackupCommand(
|
backupCmd := barmanBackup.NewBackupCommand(
|
||||||
@ -88,53 +99,64 @@ func (b BackupServiceImplementation) Backup(
|
|||||||
// We need to connect to PostgreSQL and to do that we need
|
// We need to connect to PostgreSQL and to do that we need
|
||||||
// PGHOST (and the like) to be available
|
// PGHOST (and the like) to be available
|
||||||
osEnvironment := os.Environ()
|
osEnvironment := os.Environ()
|
||||||
caBundleEnvironment := getRestoreCABundleEnv(&objectStore.Spec.Configuration)
|
caBundleEnvironment := common.GetRestoreCABundleEnv(&objectStore.Spec.Configuration)
|
||||||
env, err := barmanCredentials.EnvSetBackupCloudCredentials(
|
env, err := barmanCredentials.EnvSetBackupCloudCredentials(
|
||||||
ctx,
|
ctx,
|
||||||
b.Client,
|
b.Client,
|
||||||
objectStore.Namespace,
|
objectStore.Namespace,
|
||||||
&objectStore.Spec.Configuration,
|
&objectStore.Spec.Configuration,
|
||||||
mergeEnv(osEnvironment, caBundleEnvironment))
|
common.MergeEnv(osEnvironment, caBundleEnvironment))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
contextLogger.Error(err, "while setting backup cloud credentials")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
serverName := cluster.Name
|
||||||
|
for _, plugin := range cluster.Spec.Plugins {
|
||||||
|
if plugin.IsEnabled() && plugin.Name == metadata.PluginName {
|
||||||
|
if pluginServerName, ok := plugin.Parameters["serverName"]; ok {
|
||||||
|
serverName = pluginServerName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
backupName := fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now()))
|
backupName := fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now()))
|
||||||
|
|
||||||
if err = backupCmd.Take(
|
if err = backupCmd.Take(
|
||||||
ctx,
|
ctx,
|
||||||
backupName,
|
backupName,
|
||||||
b.InstanceName,
|
serverName,
|
||||||
env,
|
env,
|
||||||
barmanCloudExecutor{},
|
barmanCloudExecutor{},
|
||||||
postgres.BackupTemporaryDirectory,
|
postgres.BackupTemporaryDirectory,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
contextLogger.Error(err, "while taking backup")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
executedBackupInfo, err := backupCmd.GetExecutedBackupInfo(
|
executedBackupInfo, err := backupCmd.GetExecutedBackupInfo(
|
||||||
ctx,
|
ctx,
|
||||||
backupName,
|
backupName,
|
||||||
b.InstanceName,
|
serverName,
|
||||||
barmanCloudExecutor{},
|
barmanCloudExecutor{},
|
||||||
env)
|
env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
contextLogger.Error(err, "while getting executed backup info")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("Backup completed", "backup", executedBackupInfo.ID)
|
||||||
return &backup.BackupResult{
|
return &backup.BackupResult{
|
||||||
BackupId: executedBackupInfo.ID,
|
BackupId: executedBackupInfo.ID,
|
||||||
BackupName: executedBackupInfo.BackupName,
|
BackupName: executedBackupInfo.BackupName,
|
||||||
StartedAt: metav1.Time{Time: executedBackupInfo.BeginTime}.Unix(),
|
StartedAt: metav1.Time{Time: executedBackupInfo.BeginTime}.Unix(),
|
||||||
StoppedAt: metav1.Time{Time: executedBackupInfo.EndTime}.Unix(),
|
StoppedAt: metav1.Time{Time: executedBackupInfo.EndTime}.Unix(),
|
||||||
BeginWal: executedBackupInfo.BeginWal,
|
BeginWal: executedBackupInfo.BeginWal,
|
||||||
EndWal: executedBackupInfo.EndWal,
|
EndWal: executedBackupInfo.EndWal,
|
||||||
BeginLsn: executedBackupInfo.BeginLSN,
|
BeginLsn: executedBackupInfo.BeginLSN,
|
||||||
EndLsn: executedBackupInfo.EndLSN,
|
EndLsn: executedBackupInfo.EndLSN,
|
||||||
BackupLabelFile: nil,
|
InstanceId: b.InstanceName,
|
||||||
TablespaceMapFile: nil,
|
Online: true,
|
||||||
InstanceId: b.InstanceName,
|
|
||||||
Online: true,
|
|
||||||
Metadata: map[string]string{
|
Metadata: map[string]string{
|
||||||
"timeline": strconv.Itoa(executedBackupInfo.TimeLine),
|
"timeline": strconv.Itoa(executedBackupInfo.TimeLine),
|
||||||
"version": metadata.Data.Version,
|
"version": metadata.Data.Version,
|
||||||
|
|||||||
@ -6,10 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
|
|
||||||
"github.com/cloudnative-pg/barman-cloud/pkg/archiver"
|
"github.com/cloudnative-pg/barman-cloud/pkg/archiver"
|
||||||
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
|
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
|
||||||
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
|
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
|
||||||
@ -21,13 +19,8 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
)
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
const (
|
|
||||||
// CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
|
|
||||||
// if present, requires the WAL archiver to check that the backup object
|
|
||||||
// store is empty.
|
|
||||||
CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// WALServiceImplementation is the implementation of the WAL Service
|
// WALServiceImplementation is the implementation of the WAL Service
|
||||||
@ -72,6 +65,21 @@ func (w WALServiceImplementation) Archive(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
request *wal.WALArchiveRequest,
|
request *wal.WALArchiveRequest,
|
||||||
) (*wal.WALArchiveResult, error) {
|
) (*wal.WALArchiveResult, error) {
|
||||||
|
var cluster cnpgv1.Cluster
|
||||||
|
if err := w.Client.Get(ctx, w.ClusterObjectKey, &cluster); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: refactor this code elsewhere
|
||||||
|
serverName := cluster.Name
|
||||||
|
for _, plugin := range cluster.Spec.Plugins {
|
||||||
|
if plugin.IsEnabled() && plugin.Name == metadata.PluginName {
|
||||||
|
if pluginServerName, ok := plugin.Parameters["serverName"]; ok {
|
||||||
|
serverName = pluginServerName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var objectStore barmancloudv1.ObjectStore
|
var objectStore barmancloudv1.ObjectStore
|
||||||
if err := w.Client.Get(ctx, w.BarmanObjectKey, &objectStore); err != nil {
|
if err := w.Client.Get(ctx, w.BarmanObjectKey, &objectStore); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -95,13 +103,13 @@ func (w WALServiceImplementation) Archive(
|
|||||||
envArchive,
|
envArchive,
|
||||||
w.SpoolDirectory,
|
w.SpoolDirectory,
|
||||||
w.PGDataPath,
|
w.PGDataPath,
|
||||||
path.Join(w.PGDataPath, CheckEmptyWalArchiveFile),
|
path.Join(w.PGDataPath, metadata.CheckEmptyWalArchiveFile),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
options, err := arch.BarmanCloudWalArchiveOptions(ctx, &objectStore.Spec.Configuration, objectStore.Name)
|
options, err := arch.BarmanCloudWalArchiveOptions(ctx, &objectStore.Spec.Configuration, serverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -117,6 +125,7 @@ func (w WALServiceImplementation) Archive(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore implements the WALService interface
|
// Restore implements the WALService interface
|
||||||
|
// nolint: gocognit
|
||||||
func (w WALServiceImplementation) Restore(
|
func (w WALServiceImplementation) Restore(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
request *wal.WALRestoreRequest,
|
request *wal.WALRestoreRequest,
|
||||||
@ -140,7 +149,7 @@ func (w WALServiceImplementation) Restore(
|
|||||||
|
|
||||||
barmanConfiguration := &objectStore.Spec.Configuration
|
barmanConfiguration := &objectStore.Spec.Configuration
|
||||||
|
|
||||||
env := getRestoreCABundleEnv(barmanConfiguration)
|
env := common.GetRestoreCABundleEnv(barmanConfiguration)
|
||||||
credentialsEnv, err := barmanCredentials.EnvSetBackupCloudCredentials(
|
credentialsEnv, err := barmanCredentials.EnvSetBackupCloudCredentials(
|
||||||
ctx,
|
ctx,
|
||||||
w.Client,
|
w.Client,
|
||||||
@ -151,9 +160,19 @@ func (w WALServiceImplementation) Restore(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("while getting recover credentials: %w", err)
|
return nil, fmt.Errorf("while getting recover credentials: %w", err)
|
||||||
}
|
}
|
||||||
env = mergeEnv(env, credentialsEnv)
|
env = common.MergeEnv(env, credentialsEnv)
|
||||||
|
|
||||||
options, err := barmanCommand.CloudWalRestoreOptions(ctx, barmanConfiguration, objectStore.Name)
|
// TODO: refactor this code elsewhere
|
||||||
|
serverName := cluster.Name
|
||||||
|
for _, plugin := range cluster.Spec.Plugins {
|
||||||
|
if plugin.IsEnabled() && plugin.Name == metadata.PluginName {
|
||||||
|
if pluginServerName, ok := plugin.Parameters["serverName"]; ok {
|
||||||
|
serverName = pluginServerName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
options, err := barmanCommand.CloudWalRestoreOptions(ctx, barmanConfiguration, serverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("while getting barman-cloud-wal-restore options: %w", err)
|
return nil, fmt.Errorf("while getting barman-cloud-wal-restore options: %w", err)
|
||||||
}
|
}
|
||||||
@ -260,68 +279,6 @@ func (w WALServiceImplementation) SetFirstRequired(
|
|||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeEnv merges all the values inside incomingEnv into env.
|
|
||||||
func mergeEnv(env []string, incomingEnv []string) []string {
|
|
||||||
result := make([]string, len(env), len(env)+len(incomingEnv))
|
|
||||||
copy(result, env)
|
|
||||||
|
|
||||||
for _, incomingItem := range incomingEnv {
|
|
||||||
incomingKV := strings.SplitAfterN(incomingItem, "=", 2)
|
|
||||||
if len(incomingKV) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for idx, item := range result {
|
|
||||||
if strings.HasPrefix(item, incomingKV[0]) {
|
|
||||||
result[idx] = incomingItem
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
result = append(result, incomingItem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: refactor.
|
|
||||||
const (
|
|
||||||
// ScratchDataDirectory is the directory to be used for scratch data.
|
|
||||||
ScratchDataDirectory = "/controller"
|
|
||||||
|
|
||||||
// CertificatesDir location to store the certificates.
|
|
||||||
CertificatesDir = ScratchDataDirectory + "/certificates/"
|
|
||||||
|
|
||||||
// BarmanBackupEndpointCACertificateLocation is the location where the barman endpoint
|
|
||||||
// CA certificate is stored.
|
|
||||||
BarmanBackupEndpointCACertificateLocation = CertificatesDir + BarmanBackupEndpointCACertificateFileName
|
|
||||||
|
|
||||||
// BarmanBackupEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
|
||||||
// CA certificate for backups is stored.
|
|
||||||
BarmanBackupEndpointCACertificateFileName = "backup-" + BarmanEndpointCACertificateFileName
|
|
||||||
|
|
||||||
// BarmanRestoreEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
|
||||||
// CA certificate for restores is stored.
|
|
||||||
BarmanRestoreEndpointCACertificateFileName = "restore-" + BarmanEndpointCACertificateFileName
|
|
||||||
|
|
||||||
// BarmanEndpointCACertificateFileName is the name of the file in which the barman endpoint
|
|
||||||
// CA certificate is stored.
|
|
||||||
BarmanEndpointCACertificateFileName = "barman-ca.crt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getRestoreCABundleEnv(configuration *barmanapi.BarmanObjectStoreConfiguration) []string {
|
|
||||||
var env []string
|
|
||||||
|
|
||||||
if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil {
|
|
||||||
env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", BarmanBackupEndpointCACertificateLocation))
|
|
||||||
} else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil {
|
|
||||||
env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", BarmanBackupEndpointCACertificateLocation))
|
|
||||||
}
|
|
||||||
return env
|
|
||||||
}
|
|
||||||
|
|
||||||
// isStreamingAvailable checks if this pod can replicate via streaming connection.
|
// isStreamingAvailable checks if this pod can replicate via streaming connection.
|
||||||
func isStreamingAvailable(cluster *cnpgv1.Cluster, podName string) bool {
|
func isStreamingAvailable(cluster *cnpgv1.Cluster, podName string) bool {
|
||||||
if cluster == nil {
|
if cluster == nil {
|
||||||
|
|||||||
@ -6,6 +6,13 @@ import "github.com/cloudnative-pg/cnpg-i/pkg/identity"
|
|||||||
// Point-of-view
|
// Point-of-view
|
||||||
const PluginName = "barman-cloud.cloudnative-pg.io"
|
const PluginName = "barman-cloud.cloudnative-pg.io"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
|
||||||
|
// if present, requires the WAL archiver to check that the backup object
|
||||||
|
// store is empty.
|
||||||
|
CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
|
||||||
|
)
|
||||||
|
|
||||||
// Data is the metadata of this plugin.
|
// Data is the metadata of this plugin.
|
||||||
var Data = identity.GetPluginMetadataResponse{
|
var Data = identity.GetPluginMetadataResponse{
|
||||||
Name: PluginName,
|
Name: PluginName,
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/common"
|
|
||||||
|
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
)
|
)
|
||||||
@ -49,23 +48,49 @@ type PluginConfiguration struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFromCluster extracts the configuration from the cluster
|
// NewFromCluster extracts the configuration from the cluster
|
||||||
func NewFromCluster(cluster *cnpgv1.Cluster) (*PluginConfiguration, error) {
|
func NewFromCluster(cluster *cnpgv1.Cluster) *PluginConfiguration {
|
||||||
helper := common.NewPlugin(
|
helper := NewPlugin(
|
||||||
*cluster,
|
*cluster,
|
||||||
metadata.PluginName,
|
metadata.PluginName,
|
||||||
)
|
)
|
||||||
|
|
||||||
result := &PluginConfiguration{
|
result := &PluginConfiguration{
|
||||||
|
// used for the backup/archive
|
||||||
BarmanObjectName: helper.Parameters["barmanObjectName"],
|
BarmanObjectName: helper.Parameters["barmanObjectName"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateBarmanObjectName checks if the barmanObjectName is set
|
||||||
|
func (p *PluginConfiguration) ValidateBarmanObjectName() error {
|
||||||
err := NewConfigurationError()
|
err := NewConfigurationError()
|
||||||
if len(result.BarmanObjectName) == 0 {
|
if len(p.BarmanObjectName) != 0 {
|
||||||
err = err.WithMessage("Missing barmanObjectName parameter")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err.IsEmpty() {
|
return err.WithMessage("Missing barmanObjectName parameter")
|
||||||
return result, nil
|
}
|
||||||
}
|
|
||||||
return result, err
|
// Plugin represents a plugin with its associated cluster and parameters.
|
||||||
|
type Plugin struct {
|
||||||
|
Cluster *cnpgv1.Cluster
|
||||||
|
// Parameters are the configuration parameters of this plugin
|
||||||
|
Parameters map[string]string
|
||||||
|
PluginIndex int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPlugin creates a new Plugin instance for the given cluster and plugin name.
|
||||||
|
func NewPlugin(cluster cnpgv1.Cluster, pluginName string) *Plugin {
|
||||||
|
result := &Plugin{Cluster: &cluster}
|
||||||
|
|
||||||
|
result.PluginIndex = -1
|
||||||
|
for idx, cfg := range result.Cluster.Spec.Plugins {
|
||||||
|
if cfg.Name == pluginName {
|
||||||
|
result.PluginIndex = idx
|
||||||
|
result.Parameters = cfg.Parameters
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,14 +3,20 @@ package operator
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
|
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
|
||||||
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
|
||||||
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
|
||||||
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
|
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
|
||||||
"github.com/cloudnative-pg/machinery/pkg/log"
|
"github.com/cloudnative-pg/machinery/pkg/log"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,6 +44,15 @@ func (impl LifecycleImplementation) GetCapabilities(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Group: batchv1.GroupName,
|
||||||
|
Kind: "Job",
|
||||||
|
OperationTypes: []*lifecycle.OperatorOperationType{
|
||||||
|
{
|
||||||
|
Type: lifecycle.OperatorOperationType_TYPE_CREATE,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -47,50 +62,123 @@ func (impl LifecycleImplementation) LifecycleHook(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
request *lifecycle.OperatorLifecycleRequest,
|
request *lifecycle.OperatorLifecycleRequest,
|
||||||
) (*lifecycle.OperatorLifecycleResponse, error) {
|
) (*lifecycle.OperatorLifecycleResponse, error) {
|
||||||
contextLogger := log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle")
|
contextLogger := log.FromContext(ctx).WithName("lifecycle")
|
||||||
|
contextLogger.Info("Lifecycle hook reconciliation start")
|
||||||
operation := request.GetOperationType().GetType().Enum()
|
operation := request.GetOperationType().GetType().Enum()
|
||||||
if operation == nil {
|
if operation == nil {
|
||||||
return nil, errors.New("no operation set")
|
return nil, errors.New("no operation set")
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster, err := decoder.DecodeClusterJSON(request.GetClusterDefinition())
|
kind, err := object.GetKind(request.GetObjectDefinition())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var cluster cnpgv1.Cluster
|
||||||
|
if err := decoder.DecodeObject(
|
||||||
|
request.GetClusterDefinition(),
|
||||||
|
&cluster,
|
||||||
|
cnpgv1.GroupVersion.WithKind("Cluster"),
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pluginConfiguration := config.NewFromCluster(&cluster)
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case "Pod":
|
||||||
|
contextLogger.Info("Reconciling pod")
|
||||||
|
return reconcilePod(ctx, &cluster, request, pluginConfiguration)
|
||||||
|
case "Job":
|
||||||
|
contextLogger.Info("Reconciling job")
|
||||||
|
return reconcileJob(ctx, &cluster, request, pluginConfiguration)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported kind: %s", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func reconcileJob(
|
||||||
|
ctx context.Context,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
request *lifecycle.OperatorLifecycleRequest,
|
||||||
|
pluginConfiguration *config.PluginConfiguration,
|
||||||
|
) (*lifecycle.OperatorLifecycleResponse, error) {
|
||||||
|
contextLogger := log.FromContext(ctx).WithName("lifecycle")
|
||||||
|
if pluginConfig := cluster.GetRecoverySourcePlugin(); pluginConfig == nil || pluginConfig.Name != metadata.PluginName {
|
||||||
|
contextLogger.Debug("cluster does not use the this plugin for recovery, skipping")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var job batchv1.Job
|
||||||
|
if err := decoder.DecodeObject(
|
||||||
|
request.GetObjectDefinition(),
|
||||||
|
&job,
|
||||||
|
batchv1.SchemeGroupVersion.WithKind("Job"),
|
||||||
|
); err != nil {
|
||||||
|
contextLogger.Error(err, "failed to decode job")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger = log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle").
|
||||||
|
WithValues("jobName", job.Name)
|
||||||
|
contextLogger.Debug("starting job reconciliation")
|
||||||
|
|
||||||
|
if job.Spec.Template.Labels[utils.JobRoleLabelName] != "full-recovery" {
|
||||||
|
contextLogger.Debug("job is not a recovery job, skipping")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mutatedJob := job.DeepCopy()
|
||||||
|
|
||||||
|
if err := reconcilePodSpec(
|
||||||
|
pluginConfiguration,
|
||||||
|
cluster,
|
||||||
|
&mutatedJob.Spec.Template.Spec,
|
||||||
|
"full-recovery",
|
||||||
|
corev1.Container{
|
||||||
|
Args: []string{"restore"},
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("while reconciling pod spec for job: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patch, err := object.CreatePatch(mutatedJob, &job)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger.Debug("generated patch", "content", string(patch))
|
||||||
|
return &lifecycle.OperatorLifecycleResponse{
|
||||||
|
JsonPatch: patch,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reconcilePod(
|
||||||
|
ctx context.Context,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
request *lifecycle.OperatorLifecycleRequest,
|
||||||
|
pluginConfiguration *config.PluginConfiguration,
|
||||||
|
) (*lifecycle.OperatorLifecycleResponse, error) {
|
||||||
|
contextLogger := log.FromContext(ctx).WithName("lifecycle")
|
||||||
|
if err := pluginConfiguration.ValidateBarmanObjectName(); err != nil {
|
||||||
|
contextLogger.Info("no barman object name set, skipping pod sidecar injection")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
pod, err := decoder.DecodePodJSON(request.GetObjectDefinition())
|
pod, err := decoder.DecodePodJSON(request.GetObjectDefinition())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pluginConfiguration, err := config.NewFromCluster(cluster)
|
contextLogger = log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle").
|
||||||
if err != nil {
|
WithValues("podName", pod.Name)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mutatedPod := pod.DeepCopy()
|
mutatedPod := pod.DeepCopy()
|
||||||
err = object.InjectPluginSidecar(mutatedPod, &corev1.Container{
|
|
||||||
Name: "plugin-barman-cloud",
|
if err := reconcilePodSpec(pluginConfiguration, cluster, &mutatedPod.Spec, "postgres", corev1.Container{
|
||||||
Image: viper.GetString("sidecar-image"),
|
Args: []string{"instance"},
|
||||||
Env: []corev1.EnvVar{
|
}); err != nil {
|
||||||
{
|
return nil, fmt.Errorf("while reconciling pod spec for pod: %w", err)
|
||||||
Name: "BARMAN_OBJECT_NAME",
|
|
||||||
Value: pluginConfiguration.BarmanObjectName,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// TODO: should we really use this one?
|
|
||||||
// should we mount an emptyDir volume just for that?
|
|
||||||
Name: "SPOOL_DIRECTORY",
|
|
||||||
Value: "/controller/wal-restore-spool",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Command: []string{
|
|
||||||
"/usr/local/bin/instance",
|
|
||||||
},
|
|
||||||
}, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
patch, err := object.CreatePatch(mutatedPod, pod)
|
patch, err := object.CreatePatch(mutatedPod, pod)
|
||||||
@ -103,3 +191,165 @@ func (impl LifecycleImplementation) LifecycleHook(
|
|||||||
JsonPatch: patch,
|
JsonPatch: patch,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func reconcilePodSpec(
|
||||||
|
cfg *config.PluginConfiguration,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
spec *corev1.PodSpec,
|
||||||
|
mainContainerName string,
|
||||||
|
sidecarConfig corev1.Container,
|
||||||
|
) error {
|
||||||
|
envs := []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "NAMESPACE",
|
||||||
|
Value: cluster.Namespace,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "CLUSTER_NAME",
|
||||||
|
Value: cluster.Name,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "BARMAN_OBJECT_NAME",
|
||||||
|
Value: cfg.BarmanObjectName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// TODO: should we really use this one?
|
||||||
|
// should we mount an emptyDir volume just for that?
|
||||||
|
Name: "SPOOL_DIRECTORY",
|
||||||
|
Value: "/controller/wal-restore-spool",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// fixed values
|
||||||
|
sidecarConfig.Name = "plugin-barman-cloud"
|
||||||
|
sidecarConfig.Image = viper.GetString("sidecar-image")
|
||||||
|
sidecarConfig.ImagePullPolicy = cluster.Spec.ImagePullPolicy
|
||||||
|
|
||||||
|
// merge the main container envs if they aren't already set
|
||||||
|
for _, container := range spec.Containers {
|
||||||
|
if container.Name == mainContainerName {
|
||||||
|
for _, env := range container.Env {
|
||||||
|
found := false
|
||||||
|
for _, existingEnv := range sidecarConfig.Env {
|
||||||
|
if existingEnv.Name == env.Name {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
sidecarConfig.Env = append(sidecarConfig.Env, env)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge the default envs if they aren't already set
|
||||||
|
for _, env := range envs {
|
||||||
|
found := false
|
||||||
|
for _, existingEnv := range sidecarConfig.Env {
|
||||||
|
if existingEnv.Name == env.Name {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
sidecarConfig.Env = append(sidecarConfig.Env, env)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := InjectPluginSidecarPodSpec(spec, &sidecarConfig, mainContainerName, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: move to machinery once the logic is finalized
|
||||||
|
|
||||||
|
// InjectPluginVolumePodSpec injects the plugin volume into a CNPG Pod spec.
|
||||||
|
func InjectPluginVolumePodSpec(spec *corev1.PodSpec, mainContainerName string) {
|
||||||
|
const (
|
||||||
|
pluginVolumeName = "plugins"
|
||||||
|
pluginMountPath = "/plugins"
|
||||||
|
)
|
||||||
|
|
||||||
|
foundPluginVolume := false
|
||||||
|
for i := range spec.Volumes {
|
||||||
|
if spec.Volumes[i].Name == pluginVolumeName {
|
||||||
|
foundPluginVolume = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundPluginVolume {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.Volumes = append(spec.Volumes, corev1.Volume{
|
||||||
|
Name: pluginVolumeName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := range spec.Containers {
|
||||||
|
if spec.Containers[i].Name == mainContainerName {
|
||||||
|
spec.Containers[i].VolumeMounts = append(
|
||||||
|
spec.Containers[i].VolumeMounts,
|
||||||
|
corev1.VolumeMount{
|
||||||
|
Name: pluginVolumeName,
|
||||||
|
MountPath: pluginMountPath,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectPluginSidecarPodSpec injects a plugin sidecar into a CNPG Pod spec.
|
||||||
|
//
|
||||||
|
// If the "injectMainContainerVolumes" flag is true, this will append all the volume
|
||||||
|
// mounts that are used in the instance manager Pod to the passed sidecar
|
||||||
|
// container, granting it superuser access to the PostgreSQL instance.
|
||||||
|
func InjectPluginSidecarPodSpec(
|
||||||
|
spec *corev1.PodSpec,
|
||||||
|
sidecar *corev1.Container,
|
||||||
|
mainContainerName string,
|
||||||
|
injectMainContainerVolumes bool,
|
||||||
|
) error {
|
||||||
|
sidecar = sidecar.DeepCopy()
|
||||||
|
InjectPluginVolumePodSpec(spec, mainContainerName)
|
||||||
|
|
||||||
|
var volumeMounts []corev1.VolumeMount
|
||||||
|
sidecarContainerFound := false
|
||||||
|
mainContainerFound := false
|
||||||
|
for i := range spec.Containers {
|
||||||
|
if spec.Containers[i].Name == mainContainerName {
|
||||||
|
volumeMounts = spec.Containers[i].VolumeMounts
|
||||||
|
mainContainerFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !mainContainerFound {
|
||||||
|
return errors.New("main container not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range spec.InitContainers {
|
||||||
|
if spec.InitContainers[i].Name == sidecar.Name {
|
||||||
|
sidecarContainerFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sidecarContainerFound {
|
||||||
|
// The sidecar container was already added
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not modify the passed sidecar definition
|
||||||
|
if injectMainContainerVolumes {
|
||||||
|
sidecar.VolumeMounts = append(sidecar.VolumeMounts, volumeMounts...)
|
||||||
|
}
|
||||||
|
sidecar.RestartPolicy = ptr.To(corev1.ContainerRestartPolicyAlways)
|
||||||
|
spec.InitContainers = append(spec.InitContainers, *sidecar)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
211
internal/cnpgi/operator/lifecycle_test.go
Normal file
211
internal/cnpgi/operator/lifecycle_test.go
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
package operator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
|
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
|
||||||
|
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
|
||||||
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("LifecycleImplementation", func() {
|
||||||
|
var (
|
||||||
|
lifecycleImpl LifecycleImplementation
|
||||||
|
pluginConfiguration *config.PluginConfiguration
|
||||||
|
cluster *cnpgv1.Cluster
|
||||||
|
jobTypeMeta = metav1.TypeMeta{
|
||||||
|
Kind: "Job",
|
||||||
|
APIVersion: "batch/v1",
|
||||||
|
}
|
||||||
|
podTypeMeta = metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
pluginConfiguration = &config.PluginConfiguration{
|
||||||
|
BarmanObjectName: "minio-store-dest",
|
||||||
|
}
|
||||||
|
cluster = &cnpgv1.Cluster{
|
||||||
|
Spec: cnpgv1.ClusterSpec{
|
||||||
|
Bootstrap: &cnpgv1.BootstrapConfiguration{
|
||||||
|
Recovery: &cnpgv1.BootstrapRecovery{
|
||||||
|
Source: "origin-server",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ExternalClusters: []cnpgv1.ExternalCluster{
|
||||||
|
{
|
||||||
|
Name: "origin-server",
|
||||||
|
PluginConfiguration: &cnpgv1.PluginConfiguration{
|
||||||
|
Name: "barman-cloud.cloudnative-pg.io",
|
||||||
|
Parameters: map[string]string{
|
||||||
|
"barmanObjectName": "minio-store-source",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Plugins: cnpgv1.PluginConfigurationList{
|
||||||
|
{
|
||||||
|
Name: "barman-cloud.cloudnative-pg.io",
|
||||||
|
Parameters: map[string]string{
|
||||||
|
"barmanObjectName": "minio-store-dest",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("GetCapabilities", func() {
|
||||||
|
It("returns the correct capabilities", func(ctx SpecContext) {
|
||||||
|
response, err := lifecycleImpl.GetCapabilities(ctx, &lifecycle.OperatorLifecycleCapabilitiesRequest{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(response).NotTo(BeNil())
|
||||||
|
Expect(response.LifecycleCapabilities).To(HaveLen(2))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("LifecycleHook", func() {
|
||||||
|
It("returns an error if object definition is invalid", func(ctx SpecContext) {
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: []byte("invalid-json"),
|
||||||
|
}
|
||||||
|
response, err := lifecycleImpl.LifecycleHook(ctx, request)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(response).To(BeNil())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("reconcileJob", func() {
|
||||||
|
It("returns a patch for a valid recovery job", func(ctx SpecContext) {
|
||||||
|
job := &batchv1.Job{
|
||||||
|
TypeMeta: jobTypeMeta,
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-job",
|
||||||
|
Labels: map[string]string{},
|
||||||
|
},
|
||||||
|
Spec: batchv1.JobSpec{Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
utils.JobRoleLabelName: "full-recovery",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "full-recovery"}}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
jobJSON, _ := json.Marshal(job)
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: jobJSON,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(response).NotTo(BeNil())
|
||||||
|
Expect(response.JsonPatch).NotTo(BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("skips non-recovery jobs", func(ctx SpecContext) {
|
||||||
|
job := &batchv1.Job{
|
||||||
|
TypeMeta: jobTypeMeta,
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-job",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"job-role": "non-recovery",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
jobJSON, _ := json.Marshal(job)
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: jobJSON,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(response).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns an error for invalid job definition", func(ctx SpecContext) {
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: []byte("invalid-json"),
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(response).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should not error out if backup object name is not set and the job isn't full recovery",
|
||||||
|
func(ctx SpecContext) {
|
||||||
|
job := &batchv1.Job{
|
||||||
|
TypeMeta: jobTypeMeta,
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-job",
|
||||||
|
Labels: map[string]string{},
|
||||||
|
},
|
||||||
|
Spec: batchv1.JobSpec{Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
utils.JobRoleLabelName: "non-recovery",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "non-recovery"}}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
jobJSON, _ := json.Marshal(job)
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: jobJSON,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(response).To(BeNil())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("reconcilePod", func() {
|
||||||
|
It("returns a patch for a valid pod", func(ctx SpecContext) {
|
||||||
|
pod := &corev1.Pod{
|
||||||
|
TypeMeta: podTypeMeta,
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-pod",
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "postgres"}}},
|
||||||
|
}
|
||||||
|
podJSON, _ := json.Marshal(pod)
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: podJSON,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcilePod(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(response).NotTo(BeNil())
|
||||||
|
Expect(response.JsonPatch).NotTo(BeEmpty())
|
||||||
|
var patch []map[string]interface{}
|
||||||
|
err = json.Unmarshal(response.JsonPatch, &patch)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(patch).To(ContainElement(HaveKeyWithValue("op", "add")))
|
||||||
|
Expect(patch).To(ContainElement(HaveKeyWithValue("path", "/spec/initContainers")))
|
||||||
|
Expect(patch).To(ContainElement(
|
||||||
|
HaveKey("value")))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns an error for invalid pod definition", func(ctx SpecContext) {
|
||||||
|
request := &lifecycle.OperatorLifecycleRequest{
|
||||||
|
ObjectDefinition: []byte("invalid-json"),
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := reconcilePod(ctx, cluster, request, pluginConfiguration)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(response).To(BeNil())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
@ -7,14 +7,15 @@ import (
|
|||||||
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
|
||||||
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
|
||||||
"github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
|
"github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/log"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
|
|
||||||
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
|
||||||
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/specs"
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/specs"
|
||||||
)
|
)
|
||||||
@ -48,7 +49,7 @@ func (r ReconcilerImplementation) Pre(
|
|||||||
request *reconciler.ReconcilerHooksRequest,
|
request *reconciler.ReconcilerHooksRequest,
|
||||||
) (*reconciler.ReconcilerHooksResult, error) {
|
) (*reconciler.ReconcilerHooksResult, error) {
|
||||||
contextLogger := log.FromContext(ctx)
|
contextLogger := log.FromContext(ctx)
|
||||||
|
contextLogger.Info("Pre hook reconciliation start")
|
||||||
reconciledKind, err := object.GetKind(request.GetResourceDefinition())
|
reconciledKind, err := object.GetKind(request.GetResourceDefinition())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -59,45 +60,94 @@ func (r ReconcilerImplementation) Pre(
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster, err := decoder.DecodeClusterJSON(request.GetResourceDefinition())
|
contextLogger.Debug("parsing cluster definition")
|
||||||
if err != nil {
|
var cluster cnpgv1.Cluster
|
||||||
|
if err := decoder.DecodeObject(
|
||||||
|
request.GetResourceDefinition(),
|
||||||
|
&cluster,
|
||||||
|
cnpgv1.GroupVersion.WithKind("Cluster")); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
contextLogger = contextLogger.WithValues("name", cluster.Name, "namespace", cluster.Namespace)
|
contextLogger = contextLogger.WithValues("name", cluster.Name, "namespace", cluster.Namespace)
|
||||||
ctx = log.IntoContext(ctx, contextLogger)
|
ctx = log.IntoContext(ctx, contextLogger)
|
||||||
|
|
||||||
pluginConfiguration, err := config.NewFromCluster(cluster)
|
pluginConfiguration := config.NewFromCluster(&cluster)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
contextLogger.Debug("parsing barman object configuration")
|
||||||
|
|
||||||
|
var barmanObjects []barmancloudv1.ObjectStore
|
||||||
|
|
||||||
|
// this could be empty during recoveries
|
||||||
|
if pluginConfiguration.BarmanObjectName != "" {
|
||||||
|
var barmanObject barmancloudv1.ObjectStore
|
||||||
|
if err := r.Client.Get(ctx, client.ObjectKey{
|
||||||
|
Namespace: cluster.Namespace,
|
||||||
|
Name: pluginConfiguration.BarmanObjectName,
|
||||||
|
}, &barmanObject); err != nil {
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
contextLogger.Info("barman object configuration not found, requeuing")
|
||||||
|
return &reconciler.ReconcilerHooksResult{
|
||||||
|
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
barmanObjects = append(barmanObjects, barmanObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
var barmanObject barmancloudv1.ObjectStore
|
if barmanObject, err := r.getRecoveryBarmanObject(ctx, &cluster); err != nil {
|
||||||
if err := r.Client.Get(ctx, client.ObjectKey{
|
|
||||||
Namespace: cluster.Namespace,
|
|
||||||
Name: pluginConfiguration.BarmanObjectName,
|
|
||||||
}, &barmanObject); err != nil {
|
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
contextLogger.Info("Not found barman object configuration, requeuing")
|
contextLogger.Info("barman recovery object configuration not found, requeuing")
|
||||||
return &reconciler.ReconcilerHooksResult{
|
return &reconciler.ReconcilerHooksResult{
|
||||||
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE,
|
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
} else if barmanObject != nil {
|
||||||
|
barmanObjects = append(barmanObjects, *barmanObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.ensureRole(ctx, cluster, &barmanObject); err != nil {
|
var additionalSecretNames []string
|
||||||
|
if err := r.ensureRole(ctx, &cluster, barmanObjects, additionalSecretNames); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.ensureRoleBinding(ctx, cluster); err != nil {
|
if err := r.ensureRoleBinding(ctx, &cluster); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("Pre hook reconciliation completed")
|
||||||
return &reconciler.ReconcilerHooksResult{
|
return &reconciler.ReconcilerHooksResult{
|
||||||
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE,
|
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r ReconcilerImplementation) getRecoveryBarmanObject(
|
||||||
|
ctx context.Context,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
) (*barmancloudv1.ObjectStore, error) {
|
||||||
|
recoveryConfig := cluster.GetRecoverySourcePlugin()
|
||||||
|
if recoveryConfig != nil && recoveryConfig.Name == metadata.PluginName {
|
||||||
|
// TODO: refactor -> cnpg-i-machinery should be able to help us on getting
|
||||||
|
// the configuration for a recovery plugin
|
||||||
|
if recoveryObjectStore, ok := recoveryConfig.Parameters["barmanObjectName"]; ok {
|
||||||
|
var barmanObject barmancloudv1.ObjectStore
|
||||||
|
if err := r.Client.Get(ctx, client.ObjectKey{
|
||||||
|
Namespace: cluster.Namespace,
|
||||||
|
Name: recoveryObjectStore,
|
||||||
|
}, &barmanObject); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &barmanObject, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Post implements the reconciler interface
|
// Post implements the reconciler interface
|
||||||
func (r ReconcilerImplementation) Post(
|
func (r ReconcilerImplementation) Post(
|
||||||
_ context.Context,
|
_ context.Context,
|
||||||
@ -111,10 +161,11 @@ func (r ReconcilerImplementation) Post(
|
|||||||
func (r ReconcilerImplementation) ensureRole(
|
func (r ReconcilerImplementation) ensureRole(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cluster *cnpgv1.Cluster,
|
cluster *cnpgv1.Cluster,
|
||||||
barmanObject *barmancloudv1.ObjectStore,
|
barmanObjects []barmancloudv1.ObjectStore,
|
||||||
|
additionalSecretNames []string,
|
||||||
) error {
|
) error {
|
||||||
contextLogger := log.FromContext(ctx)
|
contextLogger := log.FromContext(ctx)
|
||||||
newRole := specs.BuildRole(cluster, barmanObject)
|
newRole := specs.BuildRole(cluster, barmanObjects, additionalSecretNames)
|
||||||
|
|
||||||
var role rbacv1.Role
|
var role rbacv1.Role
|
||||||
if err := r.Client.Get(ctx, client.ObjectKey{
|
if err := r.Client.Get(ctx, client.ObjectKey{
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/stringset"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
@ -13,47 +14,62 @@ import (
|
|||||||
// BuildRole builds the Role object for this cluster
|
// BuildRole builds the Role object for this cluster
|
||||||
func BuildRole(
|
func BuildRole(
|
||||||
cluster *cnpgv1.Cluster,
|
cluster *cnpgv1.Cluster,
|
||||||
barmanObject *barmancloudv1.ObjectStore,
|
barmanObjects []barmancloudv1.ObjectStore,
|
||||||
|
additionalSecretNames []string,
|
||||||
) *rbacv1.Role {
|
) *rbacv1.Role {
|
||||||
return &rbacv1.Role{
|
role := &rbacv1.Role{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: cluster.Namespace,
|
Namespace: cluster.Namespace,
|
||||||
Name: GetRBACName(cluster.Name),
|
Name: GetRBACName(cluster.Name),
|
||||||
},
|
},
|
||||||
|
|
||||||
Rules: []rbacv1.PolicyRule{
|
Rules: []rbacv1.PolicyRule{},
|
||||||
{
|
|
||||||
APIGroups: []string{
|
|
||||||
"barmancloud.cnpg.io",
|
|
||||||
},
|
|
||||||
Verbs: []string{
|
|
||||||
"get",
|
|
||||||
"watch",
|
|
||||||
"list",
|
|
||||||
},
|
|
||||||
Resources: []string{
|
|
||||||
"objectstores",
|
|
||||||
},
|
|
||||||
ResourceNames: []string{
|
|
||||||
barmanObject.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
APIGroups: []string{
|
|
||||||
"",
|
|
||||||
},
|
|
||||||
Resources: []string{
|
|
||||||
"secrets",
|
|
||||||
},
|
|
||||||
Verbs: []string{
|
|
||||||
"get",
|
|
||||||
"watch",
|
|
||||||
"list",
|
|
||||||
},
|
|
||||||
ResourceNames: collectSecretNames(barmanObject),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secretsSet := stringset.New()
|
||||||
|
for _, barmanObject := range barmanObjects {
|
||||||
|
role.Rules = append(role.Rules, rbacv1.PolicyRule{
|
||||||
|
APIGroups: []string{
|
||||||
|
"barmancloud.cnpg.io",
|
||||||
|
},
|
||||||
|
Verbs: []string{
|
||||||
|
"get",
|
||||||
|
"watch",
|
||||||
|
"list",
|
||||||
|
},
|
||||||
|
Resources: []string{
|
||||||
|
"objectstores",
|
||||||
|
},
|
||||||
|
ResourceNames: []string{
|
||||||
|
barmanObject.Name,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, secret := range CollectSecretNamesFromCredentials(&barmanObject.Spec.Configuration.BarmanCredentials) {
|
||||||
|
secretsSet.Put(secret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range additionalSecretNames {
|
||||||
|
secretsSet.Put(secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
role.Rules = append(role.Rules, rbacv1.PolicyRule{
|
||||||
|
APIGroups: []string{
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
Resources: []string{
|
||||||
|
"secrets",
|
||||||
|
},
|
||||||
|
Verbs: []string{
|
||||||
|
"get",
|
||||||
|
"watch",
|
||||||
|
"list",
|
||||||
|
},
|
||||||
|
ResourceNames: secretsSet.ToSortedList(),
|
||||||
|
})
|
||||||
|
|
||||||
|
return role
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildRoleBinding builds the role binding object for this cluster
|
// BuildRoleBinding builds the role binding object for this cluster
|
||||||
|
|||||||
@ -1,39 +1,35 @@
|
|||||||
package specs
|
package specs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
|
||||||
machineryapi "github.com/cloudnative-pg/machinery/pkg/api"
|
machineryapi "github.com/cloudnative-pg/machinery/pkg/api"
|
||||||
|
|
||||||
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func collectSecretNames(object *barmancloudv1.ObjectStore) []string {
|
// CollectSecretNamesFromCredentials collects the names of the secrets
|
||||||
if object == nil {
|
func CollectSecretNamesFromCredentials(barmanCredentials *barmanapi.BarmanCredentials) []string {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var references []*machineryapi.SecretKeySelector
|
var references []*machineryapi.SecretKeySelector
|
||||||
if object.Spec.Configuration.AWS != nil {
|
if barmanCredentials.AWS != nil {
|
||||||
references = append(
|
references = append(
|
||||||
references,
|
references,
|
||||||
object.Spec.Configuration.AWS.AccessKeyIDReference,
|
barmanCredentials.AWS.AccessKeyIDReference,
|
||||||
object.Spec.Configuration.AWS.SecretAccessKeyReference,
|
barmanCredentials.AWS.SecretAccessKeyReference,
|
||||||
object.Spec.Configuration.AWS.RegionReference,
|
barmanCredentials.AWS.RegionReference,
|
||||||
object.Spec.Configuration.AWS.SessionToken,
|
barmanCredentials.AWS.SessionToken,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if object.Spec.Configuration.Azure != nil {
|
if barmanCredentials.Azure != nil {
|
||||||
references = append(
|
references = append(
|
||||||
references,
|
references,
|
||||||
object.Spec.Configuration.Azure.ConnectionString,
|
barmanCredentials.Azure.ConnectionString,
|
||||||
object.Spec.Configuration.Azure.StorageAccount,
|
barmanCredentials.Azure.StorageAccount,
|
||||||
object.Spec.Configuration.Azure.StorageKey,
|
barmanCredentials.Azure.StorageKey,
|
||||||
object.Spec.Configuration.Azure.StorageSasToken,
|
barmanCredentials.Azure.StorageSasToken,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if object.Spec.Configuration.Google != nil {
|
if barmanCredentials.Google != nil {
|
||||||
references = append(
|
references = append(
|
||||||
references,
|
references,
|
||||||
object.Spec.Configuration.Google.ApplicationCredentials,
|
barmanCredentials.Google.ApplicationCredentials,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
13
internal/cnpgi/operator/suite_test.go
Normal file
13
internal/cnpgi/operator/suite_test.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package operator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOperator(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Operator Suite")
|
||||||
|
}
|
||||||
2
internal/cnpgi/restore/doc.go
Normal file
2
internal/cnpgi/restore/doc.go
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Package restore provides the restore functionality for CNPGI.
|
||||||
|
package restore
|
||||||
53
internal/cnpgi/restore/identity.go
Normal file
53
internal/cnpgi/restore/identity.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/cnpg-i/pkg/identity"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdentityImplementation implements IdentityServer
|
||||||
|
type IdentityImplementation struct {
|
||||||
|
identity.UnimplementedIdentityServer
|
||||||
|
BarmanObjectKey client.ObjectKey
|
||||||
|
Client client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPluginMetadata implements IdentityServer
|
||||||
|
func (i IdentityImplementation) GetPluginMetadata(
|
||||||
|
_ context.Context,
|
||||||
|
_ *identity.GetPluginMetadataRequest,
|
||||||
|
) (*identity.GetPluginMetadataResponse, error) {
|
||||||
|
return &metadata.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPluginCapabilities implements IdentityServer
|
||||||
|
func (i IdentityImplementation) GetPluginCapabilities(
|
||||||
|
_ context.Context,
|
||||||
|
_ *identity.GetPluginCapabilitiesRequest,
|
||||||
|
) (*identity.GetPluginCapabilitiesResponse, error) {
|
||||||
|
return &identity.GetPluginCapabilitiesResponse{
|
||||||
|
Capabilities: []*identity.PluginCapability{
|
||||||
|
{
|
||||||
|
Type: &identity.PluginCapability_Service_{
|
||||||
|
Service: &identity.PluginCapability_Service{
|
||||||
|
Type: identity.PluginCapability_Service_TYPE_RESTORE_JOB,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Probe implements IdentityServer
|
||||||
|
func (i IdentityImplementation) Probe(
|
||||||
|
_ context.Context,
|
||||||
|
_ *identity.ProbeRequest,
|
||||||
|
) (*identity.ProbeResponse, error) {
|
||||||
|
return &identity.ProbeResponse{
|
||||||
|
Ready: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
85
internal/cnpgi/restore/manager.go
Normal file
85
internal/cnpgi/restore/manager.go
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
|
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var scheme = runtime.NewScheme()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
utilruntime.Must(barmancloudv1.AddToScheme(scheme))
|
||||||
|
utilruntime.Must(cnpgv1.AddToScheme(scheme))
|
||||||
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the sidecar informers and CNPG-i server
|
||||||
|
func Start(ctx context.Context) error {
|
||||||
|
setupLog := log.FromContext(ctx)
|
||||||
|
setupLog.Info("Starting barman cloud instance plugin")
|
||||||
|
namespace := viper.GetString("namespace")
|
||||||
|
clusterName := viper.GetString("cluster-name")
|
||||||
|
|
||||||
|
objs := map[client.Object]cache.ByObject{
|
||||||
|
&cnpgv1.Cluster{}: {
|
||||||
|
Field: fields.OneTermEqualSelector("metadata.name", clusterName),
|
||||||
|
Namespaces: map[string]cache.Config{
|
||||||
|
namespace: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
|
Scheme: scheme,
|
||||||
|
Cache: cache.Options{
|
||||||
|
ByObject: objs,
|
||||||
|
},
|
||||||
|
Client: client.Options{
|
||||||
|
Cache: &client.CacheOptions{
|
||||||
|
DisableFor: []client.Object{
|
||||||
|
&corev1.Secret{},
|
||||||
|
&barmancloudv1.ObjectStore{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "unable to start manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.Add(&CNPGI{
|
||||||
|
PluginPath: viper.GetString("plugin-path"),
|
||||||
|
SpoolDirectory: viper.GetString("spool-directory"),
|
||||||
|
ClusterObjectKey: client.ObjectKey{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: clusterName,
|
||||||
|
},
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
PGDataPath: viper.GetString("pgdata"),
|
||||||
|
}); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create CNPGI runnable")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
459
internal/cnpgi/restore/restore.go
Normal file
459
internal/cnpgi/restore/restore.go
Normal file
@ -0,0 +1,459 @@
|
|||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/barman-cloud/pkg/api"
|
||||||
|
barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver"
|
||||||
|
barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities"
|
||||||
|
barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog"
|
||||||
|
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
|
||||||
|
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
|
||||||
|
barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer"
|
||||||
|
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
|
||||||
|
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
|
||||||
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
|
||||||
|
restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/execlog"
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/fileutils"
|
||||||
|
"github.com/cloudnative-pg/machinery/pkg/log"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
|
||||||
|
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ScratchDataDirectory is the directory to be used for scratch data
|
||||||
|
ScratchDataDirectory = "/controller"
|
||||||
|
|
||||||
|
// RecoveryTemporaryDirectory provides a path to store temporary files
|
||||||
|
// needed in the recovery process
|
||||||
|
RecoveryTemporaryDirectory = ScratchDataDirectory + "/recovery"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JobHookImpl is the implementation of the restore job hooks
|
||||||
|
type JobHookImpl struct {
|
||||||
|
restore.UnimplementedRestoreJobHooksServer
|
||||||
|
Client client.Client
|
||||||
|
ClusterObjectKey client.ObjectKey
|
||||||
|
BackupToRestore client.ObjectKey
|
||||||
|
SpoolDirectory string
|
||||||
|
PgDataPath string
|
||||||
|
PgWalFolderToSymlink string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCapabilities returns the capabilities of the restore job hooks
|
||||||
|
func (impl JobHookImpl) GetCapabilities(
|
||||||
|
_ context.Context,
|
||||||
|
_ *restore.RestoreJobHooksCapabilitiesRequest,
|
||||||
|
) (*restore.RestoreJobHooksCapabilitiesResult, error) {
|
||||||
|
return &restore.RestoreJobHooksCapabilitiesResult{
|
||||||
|
Capabilities: []*restore.RestoreJobHooksCapability{
|
||||||
|
{
|
||||||
|
Kind: restore.RestoreJobHooksCapability_KIND_RESTORE,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore restores the cluster from a backup
|
||||||
|
func (impl JobHookImpl) Restore(
|
||||||
|
ctx context.Context,
|
||||||
|
req *restore.RestoreRequest,
|
||||||
|
) (*restore.RestoreResponse, error) {
|
||||||
|
contextLogger := log.FromContext(ctx)
|
||||||
|
var cluster cnpgv1.Cluster
|
||||||
|
if err := decoder.DecodeObject(
|
||||||
|
req.GetClusterDefinition(),
|
||||||
|
&cluster,
|
||||||
|
cnpgv1.GroupVersion.WithKind("Cluster"),
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
recoveryPluginConfiguration := cluster.GetRecoverySourcePlugin()
|
||||||
|
|
||||||
|
var recoveryObjectStore barmancloudv1.ObjectStore
|
||||||
|
if err := impl.Client.Get(ctx, types.NamespacedName{
|
||||||
|
Namespace: cluster.Namespace,
|
||||||
|
// TODO: refactor -> cnpg-i-machinery should be able to help us on getting
|
||||||
|
// the configuration for a recovery plugin
|
||||||
|
Name: recoveryPluginConfiguration.Parameters["barmanObjectName"],
|
||||||
|
}, &recoveryObjectStore); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var targetObjectStoreName types.NamespacedName
|
||||||
|
for _, plugin := range cluster.Spec.Plugins {
|
||||||
|
if plugin.IsEnabled() && plugin.Name == metadata.PluginName {
|
||||||
|
targetObjectStoreName = types.NamespacedName{
|
||||||
|
Namespace: cluster.Namespace,
|
||||||
|
Name: plugin.Parameters["barmanObjectName"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var targetObjectStore barmancloudv1.ObjectStore
|
||||||
|
if targetObjectStoreName.Name != "" {
|
||||||
|
if err := impl.Client.Get(ctx, targetObjectStoreName, &targetObjectStore); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before starting the restore we check if the archive destination is safe to use,
|
||||||
|
// otherwise we stop creating the cluster
|
||||||
|
if targetObjectStoreName.Name != "" {
|
||||||
|
if err := impl.checkBackupDestination(ctx, &cluster, &targetObjectStore.Spec.Configuration); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detect the backup to recover
|
||||||
|
backup, env, err := loadBackupObjectFromExternalCluster(
|
||||||
|
ctx,
|
||||||
|
impl.Client,
|
||||||
|
&cluster,
|
||||||
|
&recoveryObjectStore.Spec.Configuration,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := impl.ensureArchiveContainsLastCheckpointRedoWAL(
|
||||||
|
ctx,
|
||||||
|
env,
|
||||||
|
backup,
|
||||||
|
&recoveryObjectStore.Spec.Configuration,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := impl.restoreDataDir(
|
||||||
|
ctx,
|
||||||
|
backup,
|
||||||
|
env,
|
||||||
|
&recoveryObjectStore.Spec.Configuration,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.Spec.WalStorage != nil {
|
||||||
|
if _, err := impl.restoreCustomWalDir(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := getRestoreWalConfig(ctx, backup, &recoveryObjectStore.Spec.Configuration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("sending restore response", "config", config, "env", env)
|
||||||
|
return &restore.RestoreResponse{
|
||||||
|
RestoreConfig: config,
|
||||||
|
Envs: env,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreDataDir restores PGDATA from an existing backup
|
||||||
|
func (impl JobHookImpl) restoreDataDir(
|
||||||
|
ctx context.Context,
|
||||||
|
backup *cnpgv1.Backup,
|
||||||
|
env []string,
|
||||||
|
barmanConfiguration *cnpgv1.BarmanObjectStoreConfiguration,
|
||||||
|
) error {
|
||||||
|
var options []string
|
||||||
|
|
||||||
|
if backup.Status.EndpointURL != "" {
|
||||||
|
options = append(options, "--endpoint-url", backup.Status.EndpointURL)
|
||||||
|
}
|
||||||
|
options = append(options, backup.Status.DestinationPath)
|
||||||
|
options = append(options, backup.Status.ServerName)
|
||||||
|
options = append(options, backup.Status.BackupID)
|
||||||
|
|
||||||
|
options, err := barmanCommand.AppendCloudProviderOptionsFromConfiguration(ctx, options, barmanConfiguration)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
options = append(options, impl.PgDataPath)
|
||||||
|
|
||||||
|
log.Info("Starting barman-cloud-restore",
|
||||||
|
"options", options)
|
||||||
|
|
||||||
|
cmd := exec.Command(barmanCapabilities.BarmanCloudRestore, options...) // #nosec G204
|
||||||
|
cmd.Env = env
|
||||||
|
err = execlog.RunStreaming(cmd, barmanCapabilities.BarmanCloudRestore)
|
||||||
|
if err != nil {
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
|
err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(ctx, exitError.ExitCode())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error(err, "Can't restore backup")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Restore completed")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl JobHookImpl) ensureArchiveContainsLastCheckpointRedoWAL(
|
||||||
|
ctx context.Context,
|
||||||
|
env []string,
|
||||||
|
backup *cnpgv1.Backup,
|
||||||
|
barmanConfiguration *cnpgv1.BarmanObjectStoreConfiguration,
|
||||||
|
) error {
|
||||||
|
// it's the full path of the file that will temporarily contain the LastCheckpointRedoWAL
|
||||||
|
const testWALPath = RecoveryTemporaryDirectory + "/test.wal"
|
||||||
|
contextLogger := log.FromContext(ctx)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := fileutils.RemoveFile(testWALPath); err != nil {
|
||||||
|
contextLogger.Error(err, "while deleting the temporary wal file: %w")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := fileutils.EnsureParentDirectoryExists(testWALPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rest, err := barmanRestorer.New(ctx, env, impl.SpoolDirectory)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts, err := barmanCommand.CloudWalRestoreOptions(ctx, barmanConfiguration, backup.Status.ServerName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rest.Restore(backup.Status.BeginWal, testWALPath, opts); err != nil {
|
||||||
|
return fmt.Errorf("encountered an error while checking the presence of first needed WAL in the archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *JobHookImpl) checkBackupDestination(
|
||||||
|
ctx context.Context,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
barmanConfiguration *cnpgv1.BarmanObjectStoreConfiguration,
|
||||||
|
) error {
|
||||||
|
// Get environment from cache
|
||||||
|
env, err := barmanCredentials.EnvSetRestoreCloudCredentials(ctx,
|
||||||
|
impl.Client,
|
||||||
|
cluster.Namespace,
|
||||||
|
barmanConfiguration,
|
||||||
|
os.Environ())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't get credentials for cluster %v: %w", cluster.Name, err)
|
||||||
|
}
|
||||||
|
if len(env) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate the WALArchiver to get the proper configuration
|
||||||
|
var walArchiver *barmanArchiver.WALArchiver
|
||||||
|
walArchiver, err = barmanArchiver.New(
|
||||||
|
ctx,
|
||||||
|
env,
|
||||||
|
impl.SpoolDirectory,
|
||||||
|
impl.PgDataPath,
|
||||||
|
path.Join(impl.PgDataPath, metadata.CheckEmptyWalArchiveFile))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("while creating the archiver: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: refactor this code elsewhere
|
||||||
|
serverName := cluster.Name
|
||||||
|
for _, plugin := range cluster.Spec.Plugins {
|
||||||
|
if plugin.IsEnabled() && plugin.Name == metadata.PluginName {
|
||||||
|
if pluginServerName, ok := plugin.Parameters["serverName"]; ok {
|
||||||
|
serverName = pluginServerName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get WAL archive options
|
||||||
|
checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(
|
||||||
|
ctx, barmanConfiguration, serverName)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "while getting barman-cloud-wal-archive options")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we're ok to archive in the desired destination
|
||||||
|
if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) {
|
||||||
|
return walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreCustomWalDir moves the current pg_wal data to the specified custom wal dir and applies the symlink
|
||||||
|
// returns indicating if any changes were made and any error encountered in the process
|
||||||
|
func (impl JobHookImpl) restoreCustomWalDir(ctx context.Context) (bool, error) {
|
||||||
|
const pgWalDirectory = "pg_wal"
|
||||||
|
|
||||||
|
contextLogger := log.FromContext(ctx)
|
||||||
|
pgDataWal := path.Join(impl.PgDataPath, pgWalDirectory)
|
||||||
|
|
||||||
|
// if the link is already present we have nothing to do.
|
||||||
|
if linkInfo, _ := os.Readlink(pgDataWal); linkInfo == impl.PgWalFolderToSymlink {
|
||||||
|
contextLogger.Info("symlink to the WAL volume already present, skipping the custom wal dir restore")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fileutils.EnsureDirectoryExists(impl.PgWalFolderToSymlink); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("restoring WAL volume symlink and transferring data")
|
||||||
|
if err := fileutils.EnsureDirectoryExists(pgDataWal); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fileutils.MoveDirectoryContent(pgDataWal, impl.PgWalFolderToSymlink); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fileutils.RemoveFile(pgDataWal); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, os.Symlink(impl.PgWalFolderToSymlink, pgDataWal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRestoreWalConfig obtains the content to append to `custom.conf` allowing PostgreSQL
|
||||||
|
// to complete the WAL recovery from the object storage and then start
|
||||||
|
// as a new primary
|
||||||
|
func getRestoreWalConfig(
|
||||||
|
ctx context.Context,
|
||||||
|
backup *cnpgv1.Backup,
|
||||||
|
barmanConfiguration *cnpgv1.BarmanObjectStoreConfiguration,
|
||||||
|
) (string, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
cmd := []string{barmanCapabilities.BarmanCloudWalRestore}
|
||||||
|
if backup.Status.EndpointURL != "" {
|
||||||
|
cmd = append(cmd, "--endpoint-url", backup.Status.EndpointURL)
|
||||||
|
}
|
||||||
|
cmd = append(cmd, backup.Status.DestinationPath)
|
||||||
|
cmd = append(cmd, backup.Status.ServerName)
|
||||||
|
|
||||||
|
cmd, err = barmanCommand.AppendCloudProviderOptionsFromConfiguration(ctx, cmd, barmanConfiguration)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = append(cmd, "%f", "%p")
|
||||||
|
|
||||||
|
recoveryFileContents := fmt.Sprintf(
|
||||||
|
"recovery_target_action = promote\n"+
|
||||||
|
"restore_command = '%s'\n",
|
||||||
|
strings.Join(cmd, " "))
|
||||||
|
|
||||||
|
return recoveryFileContents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadBackupObjectFromExternalCluster generates an in-memory Backup structure given a reference to
|
||||||
|
// an external cluster, loading the required information from the object store
|
||||||
|
func loadBackupObjectFromExternalCluster(
|
||||||
|
ctx context.Context,
|
||||||
|
typedClient client.Client,
|
||||||
|
cluster *cnpgv1.Cluster,
|
||||||
|
recoveryObjectStore *api.BarmanObjectStoreConfiguration,
|
||||||
|
) (*cnpgv1.Backup, []string, error) {
|
||||||
|
contextLogger := log.FromContext(ctx)
|
||||||
|
sourceName := cluster.Spec.Bootstrap.Recovery.Source
|
||||||
|
|
||||||
|
if sourceName == "" {
|
||||||
|
return nil, nil, fmt.Errorf("recovery source not specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
server, found := cluster.ExternalCluster(sourceName)
|
||||||
|
if !found {
|
||||||
|
return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: document this, should this be in the helper?
|
||||||
|
var serverName string
|
||||||
|
if pluginServerName, ok := server.PluginConfiguration.Parameters["serverName"]; ok {
|
||||||
|
serverName = pluginServerName
|
||||||
|
} else {
|
||||||
|
serverName = server.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("Recovering from external cluster",
|
||||||
|
"sourceName", sourceName,
|
||||||
|
"serverName", serverName)
|
||||||
|
|
||||||
|
env, err := barmanCredentials.EnvSetRestoreCloudCredentials(
|
||||||
|
ctx,
|
||||||
|
typedClient,
|
||||||
|
cluster.Namespace,
|
||||||
|
recoveryObjectStore,
|
||||||
|
os.Environ())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
backupCatalog, err := barmanCommand.GetBackupList(ctx, recoveryObjectStore, serverName, env)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are now choosing the right backup to restore
|
||||||
|
var targetBackup *barmanCatalog.BarmanBackup
|
||||||
|
if cluster.Spec.Bootstrap.Recovery != nil &&
|
||||||
|
cluster.Spec.Bootstrap.Recovery.RecoveryTarget != nil {
|
||||||
|
targetBackup, err = backupCatalog.FindBackupInfo(
|
||||||
|
cluster.Spec.Bootstrap.Recovery.RecoveryTarget,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
targetBackup = backupCatalog.LatestBackupInfo()
|
||||||
|
}
|
||||||
|
if targetBackup == nil {
|
||||||
|
return nil, nil, fmt.Errorf("no target backup found")
|
||||||
|
}
|
||||||
|
|
||||||
|
contextLogger.Info("Target backup found", "backup", targetBackup)
|
||||||
|
|
||||||
|
return &cnpgv1.Backup{
|
||||||
|
Spec: cnpgv1.BackupSpec{
|
||||||
|
Cluster: cnpgv1.LocalObjectReference{
|
||||||
|
Name: serverName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: cnpgv1.BackupStatus{
|
||||||
|
BarmanCredentials: recoveryObjectStore.BarmanCredentials,
|
||||||
|
EndpointCA: recoveryObjectStore.EndpointCA,
|
||||||
|
EndpointURL: recoveryObjectStore.EndpointURL,
|
||||||
|
DestinationPath: recoveryObjectStore.DestinationPath,
|
||||||
|
ServerName: serverName,
|
||||||
|
BackupID: targetBackup.ID,
|
||||||
|
Phase: cnpgv1.BackupPhaseCompleted,
|
||||||
|
StartedAt: &metav1.Time{Time: targetBackup.BeginTime},
|
||||||
|
StoppedAt: &metav1.Time{Time: targetBackup.EndTime},
|
||||||
|
BeginWal: targetBackup.BeginWal,
|
||||||
|
EndWal: targetBackup.EndWal,
|
||||||
|
BeginLSN: targetBackup.BeginLSN,
|
||||||
|
EndLSN: targetBackup.EndLSN,
|
||||||
|
Error: targetBackup.Error,
|
||||||
|
CommandOutput: "",
|
||||||
|
CommandError: "",
|
||||||
|
},
|
||||||
|
}, env, nil
|
||||||
|
}
|
||||||
44
internal/cnpgi/restore/start.go
Normal file
44
internal/cnpgi/restore/start.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/http"
|
||||||
|
restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CNPGI is the implementation of the PostgreSQL sidecar
|
||||||
|
type CNPGI struct {
|
||||||
|
PluginPath string
|
||||||
|
SpoolDirectory string
|
||||||
|
ClusterObjectKey client.ObjectKey
|
||||||
|
Client client.Client
|
||||||
|
PGDataPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the GRPC service
|
||||||
|
func (c *CNPGI) Start(ctx context.Context) error {
|
||||||
|
// PgWalVolumePgWalPath is the path of pg_wal directory inside the WAL volume when present
|
||||||
|
const PgWalVolumePgWalPath = "/var/lib/postgresql/wal/pg_wal"
|
||||||
|
|
||||||
|
enrich := func(server *grpc.Server) error {
|
||||||
|
restore.RegisterRestoreJobHooksServer(server, &JobHookImpl{
|
||||||
|
Client: c.Client,
|
||||||
|
ClusterObjectKey: c.ClusterObjectKey,
|
||||||
|
SpoolDirectory: c.SpoolDirectory,
|
||||||
|
PgDataPath: c.PGDataPath,
|
||||||
|
PgWalFolderToSymlink: PgWalVolumePgWalPath,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
IdentityImpl: IdentityImplementation{},
|
||||||
|
Enrichers: []http.ServerEnricher{enrich},
|
||||||
|
PluginPath: c.PluginPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
return srv.Start(ctx)
|
||||||
|
}
|
||||||
@ -37,6 +37,7 @@ type ObjectStoreReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;patch;update;get;list;watch
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;patch;update;get;list;watch
|
||||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;patch;update;get;list;watch
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;patch;update;get;list;watch
|
||||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;list;get;watch;delete
|
// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;list;get;watch;delete
|
||||||
|
// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=backups,verbs=get;list;watch
|
||||||
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/finalizers,verbs=update
|
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/finalizers,verbs=update
|
||||||
|
|||||||
@ -18,6 +18,7 @@ spec:
|
|||||||
serviceAccountName: plugin-barman-cloud
|
serviceAccountName: plugin-barman-cloud
|
||||||
containers:
|
containers:
|
||||||
- image: plugin-barman-cloud:latest
|
- image: plugin-barman-cloud:latest
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
name: barman-cloud
|
name: barman-cloud
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9090
|
- containerPort: 9090
|
||||||
@ -29,11 +30,13 @@ spec:
|
|||||||
key: SIDECAR_IMAGE
|
key: SIDECAR_IMAGE
|
||||||
name: plugin-barman-cloud
|
name: plugin-barman-cloud
|
||||||
args:
|
args:
|
||||||
|
- operator
|
||||||
- --server-cert=/server/tls.crt
|
- --server-cert=/server/tls.crt
|
||||||
- --server-key=/server/tls.key
|
- --server-key=/server/tls.key
|
||||||
- --client-cert=/client/tls.crt
|
- --client-cert=/client/tls.crt
|
||||||
- --server-address=:9090
|
- --server-address=:9090
|
||||||
- --leader-elect
|
- --leader-elect
|
||||||
|
- --log-level=debug
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /server
|
- mountPath: /server
|
||||||
name: server
|
name: server
|
||||||
|
|||||||
@ -11,9 +11,9 @@ resources:
|
|||||||
- ../config/rbac
|
- ../config/rbac
|
||||||
images:
|
images:
|
||||||
- name: plugin-barman-cloud
|
- name: plugin-barman-cloud
|
||||||
newName: kind.local/github.com/cloudnative-pg/plugin-barman-cloud/cmd/operator
|
# result of kind load docker-image
|
||||||
newTag: 7e901b38eaf33b047dcf2eb044c9c8ca85535d8041a3144d25f7e1a4690ea071
|
newName: docker.io/library/plugin-barman-cloud
|
||||||
secretGenerator:
|
secretGenerator:
|
||||||
- literals:
|
- literals:
|
||||||
- SIDECAR_IMAGE=kind.local/github.com/cloudnative-pg/plugin-barman-cloud/cmd/instance:ca1fd58413940a247bc52cdb44f4a6909192d781b1767dc7ee9625368ee9d7e2
|
- SIDECAR_IMAGE=docker.io/library/plugin-barman-cloud
|
||||||
name: plugin-barman-cloud
|
name: plugin-barman-cloud
|
||||||
|
|||||||
@ -13,8 +13,8 @@ MYTMPDIR="$(mktemp -d)"
|
|||||||
trap 'rm -rf -- "$MYTMPDIR"' EXIT
|
trap 'rm -rf -- "$MYTMPDIR"' EXIT
|
||||||
|
|
||||||
current_context=$(kubectl config view --raw -o json | jq -r '."current-context"' | sed "s/kind-//")
|
current_context=$(kubectl config view --raw -o json | jq -r '."current-context"' | sed "s/kind-//")
|
||||||
operator_image=$(KIND_CLUSTER_NAME="$current_context" KO_DOCKER_REPO=kind.local ko build -BP ./cmd/operator)
|
operator_image=$(KIND_CLUSTER_NAME="$current_context" KO_DOCKER_REPO=kind.local ko build -BP ./cmd/manager)
|
||||||
instance_image=$(KIND_CLUSTER_NAME="$current_context" KO_DOCKER_REPO=kind.local KO_DEFAULTBASEIMAGE="ghcr.io/cloudnative-pg/postgresql:17.0" ko build -BP ./cmd/instance)
|
instance_image=$(KIND_CLUSTER_NAME="$current_context" KO_DOCKER_REPO=kind.local KO_DEFAULTBASEIMAGE="ghcr.io/cloudnative-pg/postgresql:17.0" ko build -BP ./cmd/manager)
|
||||||
|
|
||||||
# Now we deploy the plugin inside the `cnpg-system` workspace
|
# Now we deploy the plugin inside the `cnpg-system` workspace
|
||||||
(
|
(
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user