feat(spike): restore

Signed-off-by: Armando Ruocco <armando.ruocco@enterprisedb.com>
This commit is contained in:
Armando Ruocco 2024-10-09 15:26:15 +02:00 committed by Leonardo Cecchi
parent 5d0038ecc5
commit 44ae212756
32 changed files with 1539 additions and 283 deletions

View File

@ -1,5 +1,5 @@
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
IMG ?= plugin-barman-cloud:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.31.0

2
cmd/manager/doc.go Normal file
View File

@ -0,0 +1,2 @@
// Package main is the entrypoint for the plugin
package main

38
cmd/manager/main.go Normal file
View File

@ -0,0 +1,38 @@
package main
import (
"fmt"
"os"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/instance"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/operator"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cmd/restore"
)
func main() {
cobra.EnableTraverseRunHooks = true
logFlags := &log.Flags{}
rootCmd := &cobra.Command{
Use: "manager [cmd]",
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
return nil
},
}
logFlags.AddFlags(rootCmd.PersistentFlags())
rootCmd.AddCommand(instance.NewCmd())
rootCmd.AddCommand(operator.NewCmd())
rootCmd.AddCommand(restore.NewCmd())
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@ -1,98 +0,0 @@
// Package main is the entrypoint of operator plugin
package main
import (
"fmt"
"os"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator"
)
func main() {
cobra.EnableTraverseRunHooks = true
logFlags := &log.Flags{}
rootCmd := &cobra.Command{
Use: "plugin-barman-cloud",
Short: "Starts the BarmanObjectStore reconciler and the Barman Cloud CNPG-i plugin",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(viper.GetString("sidecar-image")) == 0 {
return fmt.Errorf("missing required SIDECAR_IMAGE environment variable")
}
return operator.Start(cmd.Context())
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
return nil
},
}
logFlags.AddFlags(rootCmd.PersistentFlags())
rootCmd.Flags().String("metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
_ = viper.BindPFlag("metrics-bind-address", rootCmd.Flags().Lookup("metrics-bind-address"))
rootCmd.Flags().String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
_ = viper.BindPFlag("health-probe-bind-address", rootCmd.Flags().Lookup("health-probe-bind-address"))
rootCmd.Flags().Bool("leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
_ = viper.BindPFlag("leader-elect", rootCmd.Flags().Lookup("leader-elect"))
rootCmd.Flags().Bool("metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
_ = viper.BindPFlag("metrics-secure", rootCmd.Flags().Lookup("metrics-secure"))
rootCmd.Flags().Bool("enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
_ = viper.BindPFlag("enable-http2", rootCmd.Flags().Lookup("enable-http2"))
rootCmd.Flags().String(
"plugin-path",
"",
"The plugins socket path",
)
_ = viper.BindPFlag("plugin-path", rootCmd.Flags().Lookup("plugin-path"))
rootCmd.Flags().String(
"server-cert",
"",
"The public key to be used for the server process",
)
_ = viper.BindPFlag("server-cert", rootCmd.Flags().Lookup("server-cert"))
rootCmd.Flags().String(
"server-key",
"",
"The key to be used for the server process",
)
_ = viper.BindPFlag("server-key", rootCmd.Flags().Lookup("server-key"))
rootCmd.Flags().String(
"client-cert",
"",
"The client public key to verify the connection",
)
_ = viper.BindPFlag("client-cert", rootCmd.Flags().Lookup("client-cert"))
rootCmd.Flags().String(
"server-address",
"",
"The address where to listen (i.e. 0:9090)",
)
_ = viper.BindPFlag("server-address", rootCmd.Flags().Lookup("server-address"))
_ = viper.BindEnv("sidecar-image", "SIDECAR_IMAGE")
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@ -40,6 +40,14 @@ rules:
- get
- patch
- update
- apiGroups:
- postgresql.cnpg.io
resources:
- backups
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:

View File

@ -12,9 +12,9 @@ COPY ../go.sum go.sum
RUN go mod download
# Copy the go source
COPY ../cmd/operator/main.go cmd/operator/main.go
COPY ../api api/
COPY ../internal internal/
COPY ../cmd/manager/main.go cmd/manager/main.go
COPY ../api/ api/
COPY ../internal/ internal/
# Build
# the GOARCH has not a default value to allow the binary be built according to the host where the command
@ -22,7 +22,7 @@ COPY ../internal internal/
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/operator/main.go
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/manager/main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
@ -45,4 +45,4 @@ WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]
ENTRYPOINT ["/manager"]

View File

@ -4,7 +4,7 @@ metadata:
name: cluster-example
spec:
instances: 3
imagePullPolicy: IfNotPresent
plugins:
- name: barman-cloud.cloudnative-pg.io
parameters:

View File

@ -0,0 +1,21 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-restore
spec:
instances: 3
imagePullPolicy: IfNotPresent
bootstrap:
recovery:
backup:
name: backup-example
usePlugin: true
plugins:
- name: barman-cloud.cloudnative-pg.io
# parameters:
# barmanObjectName: minio-store
storage:
size: 1Gi

21
go.mod
View File

@ -1,13 +1,15 @@
module github.com/cloudnative-pg/plugin-barman-cloud
go 1.22.0
go 1.23
toolchain go1.23.1
require (
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241017151552-20297270038b
github.com/cloudnative-pg/cnpg-i v0.0.0-20241018104538-7f169c24d7a8
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241017094517-76870f5660ef
github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72
github.com/onsi/ginkgo/v2 v2.20.2
github.com/onsi/gomega v1.34.2
github.com/spf13/cobra v1.8.1
@ -16,6 +18,7 @@ require (
k8s.io/api v0.31.1
k8s.io/apimachinery v0.31.1
k8s.io/client-go v0.31.1
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6
sigs.k8s.io/controller-runtime v0.19.0
)
@ -27,6 +30,7 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
@ -70,8 +74,8 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 // indirect
github.com/prometheus/client_golang v1.20.3 // indirect
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 // indirect
github.com/prometheus/client_golang v1.20.4 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.59.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
@ -109,7 +113,7 @@ require (
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
google.golang.org/protobuf v1.34.2 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@ -119,7 +123,6 @@ require (
k8s.io/component-base v0.31.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect
k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect

30
go.sum
View File

@ -14,16 +14,18 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b h1:LZ9tIgKmWb8ZvyLg/J8ExXtmBtEWP2dr3Y4TU4nCq/w=
github.com/cloudnative-pg/api v0.0.0-20241004125129-98baa9f4957b/go.mod h1:mzd1EvoLYy16jJdne6/4nwhoj7t4IZ0MqJMEH4mla8Q=
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME=
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE=
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542 h1:IXf5lj+m4CBqzckQ9L/9hJ01JUoVw5N0FuPex0sVdVo=
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241001084914-829808376542/go.mod h1:L8M+kTGpz/eaLZj46+4sARvO/vDYlo/m1xOigI/ghBA=
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248 h1:eUGzb7YNjVLilwhgZoe4hDOO70fci3oqb/ZzQFbN3xg=
github.com/cloudnative-pg/cnpg-i v0.0.0-20240924030516-c5636170f248/go.mod h1:K9/4eAT3rh2bKIWyujoN8BIPRXa4d1Ls+eBY8PE8y6w=
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6 h1:C4CU5fBTYTiJBPDqcgHpXSc5IvRTy+5KTaFZzdKHfAQ=
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241002070940-e5495e9c5ed6/go.mod h1:mHEVy/Guae+rij1qlgwHg+lyFKDX48qjTL4lAqE7OJs=
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0=
github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM=
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241017151552-20297270038b h1:47OKNQRgSs9XWvIt2bm8B1Yo7TA5oxBGjQPqF0j+Llw=
github.com/cloudnative-pg/cloudnative-pg v1.24.1-0.20241017151552-20297270038b/go.mod h1:0grklCuA9WSA5lazBeTgDqLHjxP13fqAhqlveDx7hPg=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241018104538-7f169c24d7a8 h1:8m2Xcobj9tcwjDHgeJ7OJpySx908P1gqLeGMVZ7pMJU=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241018104538-7f169c24d7a8/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241017094517-76870f5660ef h1:Og/btbliFYkJ2sz899T+b832GaFQU4ai52k98zZr0xA=
github.com/cloudnative-pg/cnpg-i-machinery v0.0.0-20241017094517-76870f5660ef/go.mod h1:X6r1fRuUEIAv4+5SSBY2RmQ201K6GcptOXgnmaX/8tY=
github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 h1:3pgtSYhv3RDd+51bnlqICNrcVpWQQvriCOvkxtbZpaE=
github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -138,10 +140,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU=
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 h1:XGoEXT6WTTihO+MD8MAao+YaQIH905HbK0WK2lyo28k=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
@ -266,8 +268,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -1,28 +1,20 @@
// Package main is the entrypoint of operator plugin
package main
// Package instance is the entrypoint of instance plugin
package instance
import (
"fmt"
"os"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/instance"
)
func main() {
cobra.EnableTraverseRunHooks = true
logFlags := &log.Flags{}
rootCmd := &cobra.Command{
// NewCmd creates a new instance command
func NewCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "instance",
Short: "Starts the Barman Cloud CNPG-i sidecar plugin",
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
return nil
},
Short: "Starts the Barman Cloud CNPG-I sidecar plugin",
RunE: func(cmd *cobra.Command, _ []string) error {
requiredSettings := []string{
"namespace",
@ -42,8 +34,6 @@ func main() {
},
}
logFlags.AddFlags(rootCmd.PersistentFlags())
_ = viper.BindEnv("namespace", "NAMESPACE")
_ = viper.BindEnv("barman-object-name", "BARMAN_OBJECT_NAME")
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
@ -51,8 +41,5 @@ func main() {
_ = viper.BindEnv("pgdata", "PGDATA")
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
return cmd
}

View File

@ -0,0 +1,88 @@
// Package operator is the entrypoint of operator plugin
package operator
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator"
)
// NewCmd creates a new operator command
func NewCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "operator",
Short: "Starts the BarmanObjectStore reconciler and the Barman Cloud CNPG-i plugin",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(viper.GetString("sidecar-image")) == 0 {
return fmt.Errorf("missing required SIDECAR_IMAGE environment variable")
}
return operator.Start(cmd.Context())
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
return nil
},
}
cmd.Flags().String("metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
_ = viper.BindPFlag("metrics-bind-address", cmd.Flags().Lookup("metrics-bind-address"))
cmd.Flags().String("health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
_ = viper.BindPFlag("health-probe-bind-address", cmd.Flags().Lookup("health-probe-bind-address"))
cmd.Flags().Bool("leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
_ = viper.BindPFlag("leader-elect", cmd.Flags().Lookup("leader-elect"))
cmd.Flags().Bool("metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
_ = viper.BindPFlag("metrics-secure", cmd.Flags().Lookup("metrics-secure"))
cmd.Flags().Bool("enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
_ = viper.BindPFlag("enable-http2", cmd.Flags().Lookup("enable-http2"))
cmd.Flags().String(
"plugin-path",
"",
"The plugins socket path",
)
_ = viper.BindPFlag("plugin-path", cmd.Flags().Lookup("plugin-path"))
cmd.Flags().String(
"server-cert",
"",
"The public key to be used for the server process",
)
_ = viper.BindPFlag("server-cert", cmd.Flags().Lookup("server-cert"))
cmd.Flags().String(
"server-key",
"",
"The key to be used for the server process",
)
_ = viper.BindPFlag("server-key", cmd.Flags().Lookup("server-key"))
cmd.Flags().String(
"client-cert",
"",
"The client public key to verify the connection",
)
_ = viper.BindPFlag("client-cert", cmd.Flags().Lookup("client-cert"))
cmd.Flags().String(
"server-address",
"",
"The address where to listen (i.e. 0:9090)",
)
_ = viper.BindPFlag("server-address", cmd.Flags().Lookup("server-address"))
_ = viper.BindEnv("sidecar-image", "SIDECAR_IMAGE")
return cmd
}

View File

@ -0,0 +1,47 @@
// Package restore is the entrypoint of restore capabilities
package restore
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/restore"
)
// NewCmd creates the "restore" subcommand
func NewCmd() *cobra.Command {
cobra.EnableTraverseRunHooks = true
cmd := &cobra.Command{
Use: "restore",
Short: "Starts the Barman Cloud CNPG-I sidecar plugin",
RunE: func(cmd *cobra.Command, _ []string) error {
requiredSettings := []string{
"namespace",
"cluster-name",
"pod-name",
"spool-directory",
}
for _, k := range requiredSettings {
if len(viper.GetString(k)) == 0 {
return fmt.Errorf("missing required %s setting", k)
}
}
return restore.Start(cmd.Context())
},
}
_ = viper.BindEnv("namespace", "NAMESPACE")
_ = viper.BindEnv("backup-to-restore", "BACKUP_TO_RESTORE")
_ = viper.BindEnv("barman-archive-configuration", "BARMAN_OBJECT_NAME")
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
_ = viper.BindEnv("pod-name", "POD_NAME")
_ = viper.BindEnv("pgdata", "PGDATA")
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")
return cmd
}

View File

@ -0,0 +1,20 @@
package common
import (
"encoding/json"
"fmt"
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
)
func GetCredentialsFromBackup(backup *cnpgv1.Backup) (barmanapi.BarmanCredentials, error) {
rawCred := backup.Status.PluginMetadata["credentials"]
var creds barmanapi.BarmanCredentials
if err := json.Unmarshal([]byte(rawCred), &creds); err != nil {
return barmanapi.BarmanCredentials{}, fmt.Errorf("while unmarshaling credentials: %w", err)
}
return creds, nil
}

View File

@ -2,6 +2,7 @@ package instance
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
@ -66,8 +67,11 @@ func (b BackupServiceImplementation) Backup(
) (*backup.BackupResult, error) {
contextLogger := log.FromContext(ctx)
contextLogger.Info("Starting backup")
var objectStore barmancloudv1.ObjectStore
if err := b.Client.Get(ctx, b.BarmanObjectKey, &objectStore); err != nil {
contextLogger.Error(err, "while getting object store", "key", b.BarmanObjectKey)
return nil, err
}
@ -78,6 +82,7 @@ func (b BackupServiceImplementation) Backup(
capabilities, err := barmanCapabilities.CurrentCapabilities()
if err != nil {
contextLogger.Error(err, "while getting capabilities")
return nil, err
}
backupCmd := barmanBackup.NewBackupCommand(
@ -96,6 +101,7 @@ func (b BackupServiceImplementation) Backup(
&objectStore.Spec.Configuration,
mergeEnv(osEnvironment, caBundleEnvironment))
if err != nil {
contextLogger.Error(err, "while setting backup cloud credentials")
return nil, err
}
@ -104,42 +110,69 @@ func (b BackupServiceImplementation) Backup(
if err = backupCmd.Take(
ctx,
backupName,
b.InstanceName,
objectStore.Name,
env,
barmanCloudExecutor{},
postgres.BackupTemporaryDirectory,
); err != nil {
contextLogger.Error(err, "while taking backup")
return nil, err
}
executedBackupInfo, err := backupCmd.GetExecutedBackupInfo(
ctx,
backupName,
b.InstanceName,
objectStore.Name,
barmanCloudExecutor{},
env)
if err != nil {
contextLogger.Error(err, "while getting executed backup info")
return nil, err
}
cred, err := json.Marshal(objectStore.Spec.Configuration.BarmanCredentials)
if err != nil {
contextLogger.Error(err, "while marshalling credentials")
return nil, err
}
var endpointCA *backup.KeyName
if objectStore.Spec.Configuration.EndpointCA != nil {
endpointCA = &backup.KeyName{
Name: objectStore.Spec.Configuration.EndpointCA.Name,
Key: objectStore.Spec.Configuration.EndpointCA.Key,
}
}
var encryption string
if objectStore.Spec.Configuration.Data != nil {
encryption = string(objectStore.Spec.Configuration.Data.Encryption)
}
contextLogger.Info("Backup completed", "backup", executedBackupInfo.ID)
return &backup.BackupResult{
BackupId: executedBackupInfo.ID,
BackupName: executedBackupInfo.BackupName,
StartedAt: metav1.Time{Time: executedBackupInfo.BeginTime}.Unix(),
StoppedAt: metav1.Time{Time: executedBackupInfo.EndTime}.Unix(),
BeginWal: executedBackupInfo.BeginWal,
EndWal: executedBackupInfo.EndWal,
BeginLsn: executedBackupInfo.BeginLSN,
EndLsn: executedBackupInfo.EndLSN,
BackupLabelFile: nil,
TablespaceMapFile: nil,
InstanceId: b.InstanceName,
Online: true,
BackupId: executedBackupInfo.ID,
BackupName: executedBackupInfo.BackupName,
StartedAt: metav1.Time{Time: executedBackupInfo.BeginTime}.Unix(),
StoppedAt: metav1.Time{Time: executedBackupInfo.EndTime}.Unix(),
BeginWal: executedBackupInfo.BeginWal,
EndWal: executedBackupInfo.EndWal,
BeginLsn: executedBackupInfo.BeginLSN,
EndLsn: executedBackupInfo.EndLSN,
InstanceId: b.InstanceName,
Online: true,
Metadata: map[string]string{
"timeline": strconv.Itoa(executedBackupInfo.TimeLine),
"version": metadata.Data.Version,
"name": metadata.Data.Name,
"displayName": metadata.Data.DisplayName,
// TODO: is it safe?
"credentials": string(cred),
},
ServerName: objectStore.Name,
EndpointUrl: objectStore.Spec.Configuration.EndpointURL,
DestinationPath: objectStore.Spec.Configuration.DestinationPath,
EndpointCa: endpointCA,
Encryption: encryption,
}, nil
}

View File

@ -21,13 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
)
const (
// CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
// if present, requires the WAL archiver to check that the backup object
// store is empty.
CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
)
// WALServiceImplementation is the implementation of the WAL Service
@ -95,7 +89,7 @@ func (w WALServiceImplementation) Archive(
envArchive,
w.SpoolDirectory,
w.PGDataPath,
path.Join(w.PGDataPath, CheckEmptyWalArchiveFile),
path.Join(w.PGDataPath, metadata.CheckEmptyWalArchiveFile),
)
if err != nil {
return nil, err

View File

@ -6,6 +6,13 @@ import "github.com/cloudnative-pg/cnpg-i/pkg/identity"
// Point-of-view
const PluginName = "barman-cloud.cloudnative-pg.io"
const (
// CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
// if present, requires the WAL archiver to check that the backup object
// store is empty.
CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
)
// Data is the metadata of this plugin.
var Data = identity.GetPluginMetadataResponse{
Name: PluginName,

View File

@ -4,7 +4,6 @@ import (
"strings"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/common"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
)
@ -49,23 +48,49 @@ type PluginConfiguration struct {
}
// NewFromCluster extracts the configuration from the cluster
func NewFromCluster(cluster *cnpgv1.Cluster) (*PluginConfiguration, error) {
helper := common.NewPlugin(
func NewFromCluster(cluster *cnpgv1.Cluster) *PluginConfiguration {
helper := NewPlugin(
*cluster,
metadata.PluginName,
)
result := &PluginConfiguration{
// used for the backup/archive
BarmanObjectName: helper.Parameters["barmanObjectName"],
}
return result
}
// ValidateBarmanObjectName checks if the barmanObjectName is set
func (p *PluginConfiguration) ValidateBarmanObjectName() error {
err := NewConfigurationError()
if len(result.BarmanObjectName) == 0 {
err = err.WithMessage("Missing barmanObjectName parameter")
if len(p.BarmanObjectName) != 0 {
return nil
}
if err.IsEmpty() {
return result, nil
}
return result, err
return err.WithMessage("Missing barmanObjectName parameter")
}
// Plugin represents a plugin with its associated cluster and parameters.
type Plugin struct {
Cluster *cnpgv1.Cluster
// Parameters are the configuration parameters of this plugin
Parameters map[string]string
PluginIndex int
}
// NewPlugin creates a new Plugin instance for the given cluster and plugin name.
func NewPlugin(cluster cnpgv1.Cluster, pluginName string) *Plugin {
result := &Plugin{Cluster: &cluster}
result.PluginIndex = -1
for idx, cfg := range result.Cluster.Spec.Plugins {
if cfg.Name == pluginName {
result.PluginIndex = idx
result.Parameters = cfg.Parameters
}
}
return result
}

View File

@ -3,13 +3,18 @@ package operator
import (
"context"
"errors"
"fmt"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/viper"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
)
@ -38,6 +43,15 @@ func (impl LifecycleImplementation) GetCapabilities(
},
},
},
{
Group: batchv1.GroupName,
Kind: "Job",
OperationTypes: []*lifecycle.OperatorOperationType{
{
Type: lifecycle.OperatorOperationType_TYPE_CREATE,
},
},
},
},
}, nil
}
@ -47,50 +61,131 @@ func (impl LifecycleImplementation) LifecycleHook(
ctx context.Context,
request *lifecycle.OperatorLifecycleRequest,
) (*lifecycle.OperatorLifecycleResponse, error) {
contextLogger := log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle")
contextLogger := log.FromContext(ctx).WithName("lifecycle")
contextLogger.Info("Lifecycle hook reconciliation start")
operation := request.GetOperationType().GetType().Enum()
if operation == nil {
return nil, errors.New("no operation set")
}
cluster, err := decoder.DecodeClusterJSON(request.GetClusterDefinition())
kind, err := object.GetKind(request.GetObjectDefinition())
if err != nil {
return nil, err
}
var cluster cnpgv1.Cluster
if err := decoder.DecodeObject(
request.GetClusterDefinition(),
&cluster,
cnpgv1.GroupVersion.WithKind("Cluster"),
); err != nil {
return nil, err
}
pluginConfiguration := config.NewFromCluster(&cluster)
switch kind {
case "Pod":
contextLogger.Info("Reconciling pod")
return reconcilePod(ctx, &cluster, request, pluginConfiguration)
case "Job":
contextLogger.Info("Reconciling job")
return reconcileJob(ctx, &cluster, request, pluginConfiguration)
default:
return nil, fmt.Errorf("unsupported kind: %s", kind)
}
}
func reconcileJob(
ctx context.Context,
cluster *cnpgv1.Cluster,
request *lifecycle.OperatorLifecycleRequest,
pluginConfiguration *config.PluginConfiguration,
) (*lifecycle.OperatorLifecycleResponse, error) {
contextLogger := log.FromContext(ctx).WithName("lifecycle")
if !cluster.UsePluginForBootstrapRecoveryBackup() {
contextLogger.Debug("cluster does not use the plugin for recovery, skipping")
return nil, nil
}
backupSource := cluster.Spec.Bootstrap.Recovery.Backup
var job batchv1.Job
if err := decoder.DecodeObject(
request.GetObjectDefinition(),
&job,
batchv1.SchemeGroupVersion.WithKind("Job"),
); err != nil {
contextLogger.Error(err, "failed to decode job")
return nil, err
}
contextLogger = log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle").
WithValues("jobName", job.Name)
contextLogger.Debug("starting job reconciliation")
if job.Spec.Template.Labels[utils.JobRoleLabelName] != "full-recovery" {
contextLogger.Debug("job is not a recovery job, skipping")
return nil, nil
}
mutatedJob := job.DeepCopy()
if err := reconcilePodSpec(
pluginConfiguration,
cluster,
&mutatedJob.Spec.Template.Spec,
"full-recovery",
corev1.Container{
Args: []string{"restore"},
Env: []corev1.EnvVar{
{
Name: "BACKUP_TO_RESTORE",
Value: backupSource.Name,
},
},
},
); err != nil {
return nil, fmt.Errorf("while reconciling pod spec for job: %w", err)
}
patch, err := object.CreatePatch(mutatedJob, &job)
if err != nil {
return nil, err
}
contextLogger.Debug("generated patch", "content", string(patch))
return &lifecycle.OperatorLifecycleResponse{
JsonPatch: patch,
}, nil
}
func reconcilePod(
ctx context.Context,
cluster *cnpgv1.Cluster,
request *lifecycle.OperatorLifecycleRequest,
pluginConfiguration *config.PluginConfiguration,
) (*lifecycle.OperatorLifecycleResponse, error) {
contextLogger := log.FromContext(ctx).WithName("lifecycle")
if err := pluginConfiguration.ValidateBarmanObjectName(); err != nil {
contextLogger.Info("no barman object name set, skipping pod sidecar injection")
return nil, nil
}
pod, err := decoder.DecodePodJSON(request.GetObjectDefinition())
if err != nil {
return nil, err
}
pluginConfiguration, err := config.NewFromCluster(cluster)
if err != nil {
return nil, err
}
contextLogger = log.FromContext(ctx).WithName("plugin-barman-cloud-lifecycle").
WithValues("podName", pod.Name)
mutatedPod := pod.DeepCopy()
err = object.InjectPluginSidecar(mutatedPod, &corev1.Container{
Name: "plugin-barman-cloud",
Image: viper.GetString("sidecar-image"),
Env: []corev1.EnvVar{
{
Name: "BARMAN_OBJECT_NAME",
Value: pluginConfiguration.BarmanObjectName,
},
{
// TODO: should we really use this one?
// should we mount an emptyDir volume just for that?
Name: "SPOOL_DIRECTORY",
Value: "/controller/wal-restore-spool",
},
},
Command: []string{
"/usr/local/bin/instance",
},
}, true)
if err != nil {
return nil, err
if err := reconcilePodSpec(pluginConfiguration, cluster, &mutatedPod.Spec, "postgres", corev1.Container{
Args: []string{"instance"},
}); err != nil {
return nil, fmt.Errorf("while reconciling pod spec for pod: %w", err)
}
patch, err := object.CreatePatch(mutatedPod, pod)
@ -103,3 +198,164 @@ func (impl LifecycleImplementation) LifecycleHook(
JsonPatch: patch,
}, nil
}
func reconcilePodSpec(
cfg *config.PluginConfiguration,
cluster *cnpgv1.Cluster,
spec *corev1.PodSpec,
mainContainerName string,
sidecarConfig corev1.Container,
) error {
envs := []corev1.EnvVar{
{
Name: "NAMESPACE",
Value: cluster.Namespace,
},
{
Name: "CLUSTER_NAME",
Value: cluster.Name,
},
{
Name: "BARMAN_OBJECT_NAME",
Value: cfg.BarmanObjectName,
},
{
// TODO: should we really use this one?
// should we mount an emptyDir volume just for that?
Name: "SPOOL_DIRECTORY",
Value: "/controller/wal-restore-spool",
},
}
// fixed values
sidecarConfig.Name = "plugin-barman-cloud"
sidecarConfig.Image = viper.GetString("sidecar-image")
sidecarConfig.ImagePullPolicy = cluster.Spec.ImagePullPolicy
sidecarConfig.Command= []string{
"/usr/local/bin/instance",
}
// merge the main container envs if they aren't already set
for _, container := range spec.Containers {
if container.Name == mainContainerName {
for _, env := range container.Env {
found := false
for _, existingEnv := range sidecarConfig.Env {
if existingEnv.Name == env.Name {
found = true
break
}
}
if !found {
sidecarConfig.Env = append(sidecarConfig.Env, env)
}
}
break
}
}
// merge the default envs if they aren't already set
for _, env := range envs {
found := false
for _, existingEnv := range sidecarConfig.Env {
if existingEnv.Name == env.Name {
found = true
break
}
}
if !found {
sidecarConfig.Env = append(sidecarConfig.Env, env)
}
}
if err := InjectPluginSidecarPodSpec(spec, &sidecarConfig, mainContainerName, true); err != nil {
return err
}
return nil
}
// TODO: move to machinery once the logic is finalized
// InjectPluginVolumePodSpec injects the plugin volume into a CNPG Pod spec.
func InjectPluginVolumePodSpec(spec *corev1.PodSpec, mainContainerName string) {
const (
pluginVolumeName = "plugins"
pluginMountPath = "/plugins"
)
foundPluginVolume := false
for i := range spec.Volumes {
if spec.Volumes[i].Name == pluginVolumeName {
foundPluginVolume = true
}
}
if foundPluginVolume {
return
}
spec.Volumes = append(spec.Volumes, corev1.Volume{
Name: pluginVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
})
for i := range spec.Containers {
if spec.Containers[i].Name == mainContainerName {
spec.Containers[i].VolumeMounts = append(
spec.Containers[i].VolumeMounts,
corev1.VolumeMount{
Name: pluginVolumeName,
MountPath: pluginMountPath,
},
)
}
}
}
// InjectPluginSidecarPodSpec injects a plugin sidecar into a CNPG Pod spec.
//
// If the "injectMainContainerVolumes" flag is true, this will append all the volume
// mounts that are used in the instance manager Pod to the passed sidecar
// container, granting it superuser access to the PostgreSQL instance.
func InjectPluginSidecarPodSpec(
spec *corev1.PodSpec,
sidecar *corev1.Container,
mainContainerName string,
injectMainContainerVolumes bool,
) error {
sidecar = sidecar.DeepCopy()
InjectPluginVolumePodSpec(spec, mainContainerName)
var volumeMounts []corev1.VolumeMount
sidecarContainerFound := false
mainContainerFound := false
for i := range spec.Containers {
if spec.Containers[i].Name == mainContainerName {
volumeMounts = spec.Containers[i].VolumeMounts
mainContainerFound = true
} else if spec.Containers[i].Name == sidecar.Name {
sidecarContainerFound = true
}
}
if sidecarContainerFound {
// The sidecar container was already added
return nil
}
if !mainContainerFound {
return errors.New("main container not found")
}
// Do not modify the passed sidecar definition
if injectMainContainerVolumes {
sidecar.VolumeMounts = append(sidecar.VolumeMounts, volumeMounts...)
}
sidecar.RestartPolicy = ptr.To(corev1.ContainerRestartPolicyAlways)
spec.InitContainers = append(spec.InitContainers, *sidecar)
return nil
}

View File

@ -0,0 +1,197 @@
package operator
import (
"encoding/json"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("LifecycleImplementation", func() {
var (
lifecycleImpl LifecycleImplementation
pluginConfiguration *config.PluginConfiguration
cluster *cnpgv1.Cluster
jobTypeMeta = metav1.TypeMeta{
Kind: "Job",
APIVersion: "batch/v1",
}
podTypeMeta = metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
}
)
BeforeEach(func() {
pluginConfiguration = &config.PluginConfiguration{
BarmanObjectName: "barman-object",
}
cluster = &cnpgv1.Cluster{
Spec: cnpgv1.ClusterSpec{
Bootstrap: &cnpgv1.BootstrapConfiguration{
Recovery: &cnpgv1.BootstrapRecovery{
Backup: &cnpgv1.BackupSource{
LocalObjectReference: cnpgv1.LocalObjectReference{
Name: "backup-object",
},
UsePlugin: ptr.To(true),
},
},
},
},
}
})
Describe("GetCapabilities", func() {
It("returns the correct capabilities", func(ctx SpecContext) {
response, err := lifecycleImpl.GetCapabilities(ctx, &lifecycle.OperatorLifecycleCapabilitiesRequest{})
Expect(err).NotTo(HaveOccurred())
Expect(response).NotTo(BeNil())
Expect(response.LifecycleCapabilities).To(HaveLen(2))
})
})
Describe("LifecycleHook", func() {
It("returns an error if object definition is invalid", func(ctx SpecContext) {
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: []byte("invalid-json"),
}
response, err := lifecycleImpl.LifecycleHook(ctx, request)
Expect(err).To(HaveOccurred())
Expect(response).To(BeNil())
})
})
Describe("reconcileJob", func() {
It("returns a patch for a valid recovery job", func(ctx SpecContext) {
job := &batchv1.Job{
TypeMeta: jobTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Labels: map[string]string{},
},
Spec: batchv1.JobSpec{Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
utils.JobRoleLabelName: "full-recovery",
},
},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "full-recovery"}}},
}},
}
jobJSON, _ := json.Marshal(job)
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: jobJSON,
}
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
Expect(err).NotTo(HaveOccurred())
Expect(response).NotTo(BeNil())
Expect(response.JsonPatch).NotTo(BeEmpty())
})
It("skips non-recovery jobs", func(ctx SpecContext) {
job := &batchv1.Job{
TypeMeta: jobTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Labels: map[string]string{
"job-role": "non-recovery",
},
},
}
jobJSON, _ := json.Marshal(job)
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: jobJSON,
}
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
Expect(err).NotTo(HaveOccurred())
Expect(response).To(BeNil())
})
It("returns an error for invalid job definition", func(ctx SpecContext) {
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: []byte("invalid-json"),
}
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
Expect(err).To(HaveOccurred())
Expect(response).To(BeNil())
})
It("should not error out if backup object name is not set and the job isn't full recovery",
func(ctx SpecContext) {
job := &batchv1.Job{
TypeMeta: jobTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Labels: map[string]string{},
},
Spec: batchv1.JobSpec{Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
utils.JobRoleLabelName: "non-recovery",
},
},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "non-recovery"}}},
}},
}
jobJSON, _ := json.Marshal(job)
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: jobJSON,
}
response, err := reconcileJob(ctx, cluster, request, pluginConfiguration)
Expect(err).NotTo(HaveOccurred())
Expect(response).To(BeNil())
})
})
Describe("reconcilePod", func() {
It("returns a patch for a valid pod", func(ctx SpecContext) {
pod := &corev1.Pod{
TypeMeta: podTypeMeta,
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "postgres"}}},
}
podJSON, _ := json.Marshal(pod)
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: podJSON,
}
response, err := reconcilePod(ctx, cluster, request, pluginConfiguration)
Expect(err).NotTo(HaveOccurred())
Expect(response).NotTo(BeNil())
Expect(response.JsonPatch).NotTo(BeEmpty())
var patch []map[string]interface{}
err = json.Unmarshal(response.JsonPatch, &patch)
Expect(err).NotTo(HaveOccurred())
Expect(patch).To(ContainElement(HaveKeyWithValue("op", "add")))
Expect(patch).To(ContainElement(HaveKeyWithValue("path", "/spec/containers/1")))
Expect(patch).To(ContainElement(HaveKeyWithValue("value", HaveKeyWithValue("name", "plugin-barman-cloud"))))
})
It("returns an error for invalid pod definition", func(ctx SpecContext) {
request := &lifecycle.OperatorLifecycleRequest{
ObjectDefinition: []byte("invalid-json"),
}
response, err := reconcilePod(ctx, cluster, request, pluginConfiguration)
Expect(err).To(HaveOccurred())
Expect(response).To(BeNil())
})
})
})

View File

@ -7,12 +7,13 @@ import (
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/object"
"github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
@ -48,7 +49,7 @@ func (r ReconcilerImplementation) Pre(
request *reconciler.ReconcilerHooksRequest,
) (*reconciler.ReconcilerHooksResult, error) {
contextLogger := log.FromContext(ctx)
contextLogger.Info("Pre hook reconciliation start")
reconciledKind, err := object.GetKind(request.GetResourceDefinition())
if err != nil {
return nil, err
@ -59,40 +60,68 @@ func (r ReconcilerImplementation) Pre(
}, nil
}
cluster, err := decoder.DecodeClusterJSON(request.GetResourceDefinition())
if err != nil {
contextLogger.Debug("parsing cluster definition")
var cluster cnpgv1.Cluster
if err := decoder.DecodeObject(
request.GetResourceDefinition(),
&cluster,
cnpgv1.GroupVersion.WithKind("Cluster")); err != nil {
return nil, err
}
contextLogger = contextLogger.WithValues("name", cluster.Name, "namespace", cluster.Namespace)
ctx = log.IntoContext(ctx, contextLogger)
pluginConfiguration, err := config.NewFromCluster(cluster)
if err != nil {
return nil, err
}
pluginConfiguration := config.NewFromCluster(&cluster)
var barmanObject barmancloudv1.ObjectStore
if err := r.Client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: pluginConfiguration.BarmanObjectName,
}, &barmanObject); err != nil {
if apierrs.IsNotFound(err) {
contextLogger.Info("Not found barman object configuration, requeuing")
return &reconciler.ReconcilerHooksResult{
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE,
}, nil
contextLogger.Debug("parsing barman object configuration")
var barmanObject *barmancloudv1.ObjectStore
// this could be empty during recoveries
if pluginConfiguration.BarmanObjectName != "" {
barmanObject = &barmancloudv1.ObjectStore{}
if err := r.Client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: pluginConfiguration.BarmanObjectName,
}, barmanObject); err != nil {
if apierrs.IsNotFound(err) {
contextLogger.Info("barman object configuration not found, requeuing")
return &reconciler.ReconcilerHooksResult{
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE,
}, nil
}
return nil, err
}
}
if err := r.ensureRole(ctx, cluster, &barmanObject); err != nil {
var additionalSecretNames []string
if cluster.UsePluginForBootstrapRecoveryBackup() {
var backup cnpgv1.Backup
if err := r.Client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cluster.Spec.Bootstrap.Recovery.Backup.Name,
}, &backup); err != nil {
return nil, err
}
credentials, err := common.GetCredentialsFromBackup(&backup)
if err != nil {
return nil, err
}
additionalSecretNames = append(additionalSecretNames, specs.CollectSecretNamesFromCredentials(&credentials)...)
if backup.Status.EndpointCA != nil {
additionalSecretNames = append(additionalSecretNames, backup.Status.EndpointCA.Name)
}
}
if err := r.ensureRole(ctx, &cluster, barmanObject, additionalSecretNames); err != nil {
return nil, err
}
if err := r.ensureRoleBinding(ctx, cluster); err != nil {
if err := r.ensureRoleBinding(ctx, &cluster); err != nil {
return nil, err
}
contextLogger.Info("Pre hook reconciliation completed")
return &reconciler.ReconcilerHooksResult{
Behavior: reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE,
}, nil
@ -112,9 +141,10 @@ func (r ReconcilerImplementation) ensureRole(
ctx context.Context,
cluster *cnpgv1.Cluster,
barmanObject *barmancloudv1.ObjectStore,
additionalSecretNames []string,
) error {
contextLogger := log.FromContext(ctx)
newRole := specs.BuildRole(cluster, barmanObject)
newRole := specs.BuildRole(cluster, barmanObject, additionalSecretNames)
var role rbacv1.Role
if err := r.Client.Get(ctx, client.ObjectKey{

View File

@ -2,6 +2,7 @@ package specs
import (
"fmt"
"github.com/cloudnative-pg/machinery/pkg/stringset"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
rbacv1 "k8s.io/api/rbac/v1"
@ -14,46 +15,62 @@ import (
func BuildRole(
cluster *cnpgv1.Cluster,
barmanObject *barmancloudv1.ObjectStore,
additionalSecretNames []string,
) *rbacv1.Role {
return &rbacv1.Role{
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: cluster.Namespace,
Name: GetRBACName(cluster.Name),
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"barmancloud.cnpg.io",
},
Verbs: []string{
"get",
"watch",
"list",
},
Resources: []string{
"objectstores",
},
ResourceNames: []string{
barmanObject.Name,
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"get",
"watch",
"list",
},
ResourceNames: collectSecretNames(barmanObject),
},
},
Rules: []rbacv1.PolicyRule{},
}
secretsSet := stringset.New()
// TODO: we should handle removals too?
if barmanObject != nil {
role.Rules = append(role.Rules, rbacv1.PolicyRule{
APIGroups: []string{
"barmancloud.cnpg.io",
},
Verbs: []string{
"get",
"watch",
"list",
},
Resources: []string{
"objectstores",
},
ResourceNames: []string{
barmanObject.Name,
},
})
for _, secret := range CollectSecretNamesFromCredentials(&barmanObject.Spec.Configuration.BarmanCredentials) {
secretsSet.Put(secret)
}
}
for _, secret := range additionalSecretNames {
secretsSet.Put(secret)
}
role.Rules = append(role.Rules, rbacv1.PolicyRule{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"get",
"watch",
"list",
},
ResourceNames: secretsSet.ToSortedList(),
})
return role
}
// BuildRoleBinding builds the role binding object for this cluster

View File

@ -1,39 +1,35 @@
package specs
import (
barmanapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
machineryapi "github.com/cloudnative-pg/machinery/pkg/api"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
)
func collectSecretNames(object *barmancloudv1.ObjectStore) []string {
if object == nil {
return nil
}
// CollectSecretNamesFromCredentials collects the names of the secrets
func CollectSecretNamesFromCredentials(barmanCredentials *barmanapi.BarmanCredentials) []string {
var references []*machineryapi.SecretKeySelector
if object.Spec.Configuration.AWS != nil {
if barmanCredentials.AWS != nil {
references = append(
references,
object.Spec.Configuration.AWS.AccessKeyIDReference,
object.Spec.Configuration.AWS.SecretAccessKeyReference,
object.Spec.Configuration.AWS.RegionReference,
object.Spec.Configuration.AWS.SessionToken,
barmanCredentials.AWS.AccessKeyIDReference,
barmanCredentials.AWS.SecretAccessKeyReference,
barmanCredentials.AWS.RegionReference,
barmanCredentials.AWS.SessionToken,
)
}
if object.Spec.Configuration.Azure != nil {
if barmanCredentials.Azure != nil {
references = append(
references,
object.Spec.Configuration.Azure.ConnectionString,
object.Spec.Configuration.Azure.StorageAccount,
object.Spec.Configuration.Azure.StorageKey,
object.Spec.Configuration.Azure.StorageSasToken,
barmanCredentials.Azure.ConnectionString,
barmanCredentials.Azure.StorageAccount,
barmanCredentials.Azure.StorageKey,
barmanCredentials.Azure.StorageSasToken,
)
}
if object.Spec.Configuration.Google != nil {
if barmanCredentials.Google != nil {
references = append(
references,
object.Spec.Configuration.Google.ApplicationCredentials,
barmanCredentials.Google.ApplicationCredentials,
)
}

View File

@ -0,0 +1,13 @@
package operator
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestOperator(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Operator Suite")
}

View File

@ -0,0 +1,2 @@
// Package restore provides the restore functionality for CNPGI.
package restore

View File

@ -0,0 +1,53 @@
package restore
import (
"context"
"github.com/cloudnative-pg/cnpg-i/pkg/identity"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
)
// IdentityImplementation implements IdentityServer
type IdentityImplementation struct {
identity.UnimplementedIdentityServer
BarmanObjectKey client.ObjectKey
Client client.Client
}
// GetPluginMetadata implements IdentityServer
func (i IdentityImplementation) GetPluginMetadata(
_ context.Context,
_ *identity.GetPluginMetadataRequest,
) (*identity.GetPluginMetadataResponse, error) {
return &metadata.Data, nil
}
// GetPluginCapabilities implements IdentityServer
func (i IdentityImplementation) GetPluginCapabilities(
_ context.Context,
_ *identity.GetPluginCapabilitiesRequest,
) (*identity.GetPluginCapabilitiesResponse, error) {
return &identity.GetPluginCapabilitiesResponse{
Capabilities: []*identity.PluginCapability{
{
Type: &identity.PluginCapability_Service_{
Service: &identity.PluginCapability_Service{
Type: identity.PluginCapability_Service_TYPE_RESTORE_JOB,
},
},
},
},
}, nil
}
// Probe implements IdentityServer
func (i IdentityImplementation) Probe(
_ context.Context,
_ *identity.ProbeRequest,
) (*identity.ProbeResponse, error) {
return &identity.ProbeResponse{
Ready: true,
}, nil
}

View File

@ -0,0 +1,108 @@
package restore
import (
"context"
"os"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
)
var scheme = runtime.NewScheme()
func init() {
utilruntime.Must(barmancloudv1.AddToScheme(scheme))
utilruntime.Must(cnpgv1.AddToScheme(scheme))
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
}
// Start starts the sidecar informers and CNPG-i server
func Start(ctx context.Context) error {
setupLog := log.FromContext(ctx)
setupLog.Info("Starting barman cloud instance plugin")
namespace := viper.GetString("namespace")
archiveConfiguration := viper.GetString("barman-archive-configuration")
clusterName := viper.GetString("cluster-name")
backupToRestoreName := viper.GetString("backup-to-restore")
objs := map[client.Object]cache.ByObject{
&cnpgv1.Cluster{}: {
Field: fields.OneTermEqualSelector("metadata.name", clusterName),
Namespaces: map[string]cache.Config{
namespace: {},
},
},
&cnpgv1.Backup{}: {
Field: fields.OneTermEqualSelector("metadata.name", backupToRestoreName),
Namespaces: map[string]cache.Config{
namespace: {},
},
},
}
if archiveConfiguration != "" {
objs[&barmancloudv1.ObjectStore{}] = cache.ByObject{
Field: fields.OneTermEqualSelector("metadata.name", archiveConfiguration),
Namespaces: map[string]cache.Config{
namespace: {},
},
}
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Cache: cache.Options{
ByObject: objs,
},
Client: client.Options{
Cache: &client.CacheOptions{
DisableFor: []client.Object{
&corev1.Secret{},
},
},
},
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err := mgr.Add(&CNPGI{
PluginPath: viper.GetString("plugin-path"),
SpoolDirectory: viper.GetString("spool-directory"),
ArchiveConfiguration: client.ObjectKey{
Namespace: namespace,
Name: archiveConfiguration,
},
ClusterObjectKey: client.ObjectKey{
Namespace: namespace,
Name: clusterName,
},
BackupToRestoreObjectKey: client.ObjectKey{
Namespace: namespace,
Name: backupToRestoreName,
},
Client: mgr.GetClient(),
PGDataPath: viper.GetString("pgdata"),
}); err != nil {
setupLog.Error(err, "unable to create CNPGI runnable")
return err
}
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
return err
}
return nil
}

View File

@ -0,0 +1,355 @@
package restore
import (
"context"
"errors"
"fmt"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/decoder"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
"os"
"os/exec"
"path"
"strings"
"github.com/cloudnative-pg/barman-cloud/pkg/api"
barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver"
barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities"
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"github.com/cloudnative-pg/machinery/pkg/execlog"
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/client"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
)
const (
// ScratchDataDirectory is the directory to be used for scratch data
ScratchDataDirectory = "/controller"
// RecoveryTemporaryDirectory provides a path to store temporary files
// needed in the recovery process
RecoveryTemporaryDirectory = ScratchDataDirectory + "/recovery"
)
// JobHookImpl is the implementation of the restore job hooks
type JobHookImpl struct {
restore.UnimplementedRestoreJobHooksServer
Client client.Client
ClusterObjectKey client.ObjectKey
BackupToRestore client.ObjectKey
ArchiveConfiguration client.ObjectKey
SpoolDirectory string
PgDataPath string
PgWalFolderToSymlink string
}
// GetCapabilities returns the capabilities of the restore job hooks
func (impl JobHookImpl) GetCapabilities(
_ context.Context,
_ *restore.RestoreJobHooksCapabilitiesRequest,
) (*restore.RestoreJobHooksCapabilitiesResult, error) {
return &restore.RestoreJobHooksCapabilitiesResult{
Capabilities: []*restore.RestoreJobHooksCapability{
{
Kind: restore.RestoreJobHooksCapability_KIND_RESTORE,
},
},
}, nil
}
// Restore restores the cluster from a backup
func (impl JobHookImpl) Restore(
ctx context.Context,
req *restore.RestoreRequest,
) (*restore.RestoreResponse, error) {
contextLogger := log.FromContext(ctx)
var cluster cnpgv1.Cluster
if err := decoder.DecodeObject(req.GetClusterDefinition(), &cluster, cnpgv1.GroupVersion.WithKind("Cluster")); err != nil {
return nil, err
}
// Before starting the restore we check if the archive destination is safe to use
// otherwise, we stop creating the cluster
if err := impl.checkBackupDestination(ctx, &cluster); err != nil {
return nil, err
}
var backup cnpgv1.Backup
if err := decoder.DecodeObject(req.GetBackupDefinition(), &backup, cnpgv1.GroupVersion.WithKind("Backup")); err != nil {
return nil, err
}
env, err := impl.getBarmanEnvFromBackup(ctx, &backup)
if err != nil {
return nil, err
}
if err := impl.ensureArchiveContainsLastCheckpointRedoWAL(ctx, &cluster, env, &backup); err != nil {
return nil, err
}
if err := impl.restoreDataDir(ctx, &backup, env); err != nil {
return nil, err
}
if cluster.Spec.WalStorage != nil {
if _, err := impl.restoreCustomWalDir(ctx); err != nil {
return nil, err
}
}
config, err := getRestoreWalConfig(ctx, &backup)
if err != nil {
return nil, err
}
contextLogger.Info("sending restore response", "config", config, "env", env)
return &restore.RestoreResponse{
RestoreConfig: config,
Envs: env,
}, nil
}
// restoreDataDir restores PGDATA from an existing backup
func (impl JobHookImpl) restoreDataDir(ctx context.Context, backup *cnpgv1.Backup, env []string) error {
var options []string
if backup.Status.EndpointURL != "" {
options = append(options, "--endpoint-url", backup.Status.EndpointURL)
}
options = append(options, backup.Status.DestinationPath)
options = append(options, backup.Status.ServerName)
options = append(options, backup.Status.BackupID)
creds, err := common.GetCredentialsFromBackup(backup)
if err != nil {
return err
}
options, err = barmanCommand.AppendCloudProviderOptionsFromBackup(ctx, options, creds)
if err != nil {
return err
}
options = append(options, impl.PgDataPath)
log.Info("Starting barman-cloud-restore",
"options", options)
cmd := exec.Command(barmanCapabilities.BarmanCloudRestore, options...) // #nosec G204
cmd.Env = env
err = execlog.RunStreaming(cmd, barmanCapabilities.BarmanCloudRestore)
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(ctx, exitError.ExitCode())
}
log.Error(err, "Can't restore backup")
return err
}
log.Info("Restore completed")
return nil
}
func (impl JobHookImpl) ensureArchiveContainsLastCheckpointRedoWAL(
ctx context.Context,
cluster *cnpgv1.Cluster,
env []string,
backup *cnpgv1.Backup,
) error {
// it's the full path of the file that will temporarily contain the LastCheckpointRedoWAL
const testWALPath = RecoveryTemporaryDirectory + "/test.wal"
contextLogger := log.FromContext(ctx)
defer func() {
if err := fileutils.RemoveFile(testWALPath); err != nil {
contextLogger.Error(err, "while deleting the temporary wal file: %w")
}
}()
if err := fileutils.EnsureParentDirectoryExists(testWALPath); err != nil {
return err
}
rest, err := barmanRestorer.New(ctx, env, impl.SpoolDirectory)
if err != nil {
return err
}
creds, err := common.GetCredentialsFromBackup(backup)
if err != nil {
return err
}
opts, err := barmanCommand.CloudWalRestoreOptions(ctx, &api.BarmanObjectStoreConfiguration{
BarmanCredentials: creds,
EndpointCA: backup.Status.EndpointCA,
EndpointURL: backup.Status.EndpointURL,
DestinationPath: backup.Status.DestinationPath,
ServerName: backup.Status.ServerName,
}, cluster.Name)
if err != nil {
return err
}
if err := rest.Restore(backup.Status.BeginWal, testWALPath, opts); err != nil {
return fmt.Errorf("encountered an error while checking the presence of first needed WAL in the archive: %w", err)
}
return nil
}
func (impl *JobHookImpl) checkBackupDestination(
ctx context.Context,
cluster *cnpgv1.Cluster,
) error {
if impl.ArchiveConfiguration.Name == "" {
return nil
}
var barmanObj barmancloudv1.ObjectStore
if err := impl.Client.Get(ctx, impl.ArchiveConfiguration, &barmanObj); err != nil {
return err
}
// Get environment from cache
env, err := barmanCredentials.EnvSetRestoreCloudCredentials(ctx,
impl.Client,
barmanObj.Namespace,
&barmanObj.Spec.Configuration,
os.Environ())
if err != nil {
return fmt.Errorf("can't get credentials for cluster %v: %w", cluster.Name, err)
}
if len(env) == 0 {
return nil
}
// Instantiate the WALArchiver to get the proper configuration
var walArchiver *barmanArchiver.WALArchiver
walArchiver, err = barmanArchiver.New(
ctx,
env,
impl.SpoolDirectory,
impl.PgDataPath,
path.Join(impl.PgDataPath, metadata.CheckEmptyWalArchiveFile))
if err != nil {
return fmt.Errorf("while creating the archiver: %w", err)
}
// Get WAL archive options
checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(
ctx, &barmanObj.Spec.Configuration, barmanObj.Name)
if err != nil {
log.Error(err, "while getting barman-cloud-wal-archive options")
return err
}
// Check if we're ok to archive in the desired destination
if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) {
return walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions)
}
return nil
}
func (impl JobHookImpl) getBarmanEnvFromBackup(
ctx context.Context,
backup *cnpgv1.Backup,
) ([]string, error) {
creds, err := common.GetCredentialsFromBackup(backup)
if err != nil {
return nil, err
}
env, err := barmanCredentials.EnvSetRestoreCloudCredentials(
ctx,
impl.Client,
impl.BackupToRestore.Namespace,
&api.BarmanObjectStoreConfiguration{
BarmanCredentials: creds,
EndpointURL: backup.Status.EndpointURL,
EndpointCA: backup.Status.EndpointCA,
DestinationPath: backup.Status.DestinationPath,
ServerName: backup.Status.ServerName,
},
os.Environ())
if err != nil {
return nil, err
}
log.Info("Recovering existing backup", "backup", backup)
return env, nil
}
// restoreCustomWalDir moves the current pg_wal data to the specified custom wal dir and applies the symlink
// returns indicating if any changes were made and any error encountered in the process
func (impl JobHookImpl) restoreCustomWalDir(ctx context.Context) (bool, error) {
const pgWalDirectory = "pg_wal"
contextLogger := log.FromContext(ctx)
pgDataWal := path.Join(impl.PgDataPath, pgWalDirectory)
// if the link is already present we have nothing to do.
if linkInfo, _ := os.Readlink(pgDataWal); linkInfo == impl.PgWalFolderToSymlink {
contextLogger.Info("symlink to the WAL volume already present, skipping the custom wal dir restore")
return false, nil
}
if err := fileutils.EnsureDirectoryExists(impl.PgWalFolderToSymlink); err != nil {
return false, err
}
contextLogger.Info("restoring WAL volume symlink and transferring data")
if err := fileutils.EnsureDirectoryExists(pgDataWal); err != nil {
return false, err
}
if err := fileutils.MoveDirectoryContent(pgDataWal, impl.PgWalFolderToSymlink); err != nil {
return false, err
}
if err := fileutils.RemoveFile(pgDataWal); err != nil {
return false, err
}
return true, os.Symlink(impl.PgWalFolderToSymlink, pgDataWal)
}
// getRestoreWalConfig obtains the content to append to `custom.conf` allowing PostgreSQL
// to complete the WAL recovery from the object storage and then start
// as a new primary
func getRestoreWalConfig(ctx context.Context, backup *cnpgv1.Backup) (string, error) {
var err error
cmd := []string{barmanCapabilities.BarmanCloudWalRestore}
if backup.Status.EndpointURL != "" {
cmd = append(cmd, "--endpoint-url", backup.Status.EndpointURL)
}
cmd = append(cmd, backup.Status.DestinationPath)
cmd = append(cmd, backup.Status.ServerName)
creds, err := common.GetCredentialsFromBackup(backup)
if err != nil {
return "", err
}
cmd, err = barmanCommand.AppendCloudProviderOptionsFromBackup(ctx, cmd, creds)
if err != nil {
return "", err
}
cmd = append(cmd, "%f", "%p")
recoveryFileContents := fmt.Sprintf(
"recovery_target_action = promote\n"+
"restore_command = '%s'\n",
strings.Join(cmd, " "))
return recoveryFileContents, nil
}

View File

@ -0,0 +1,48 @@
package restore
import (
"context"
"github.com/cloudnative-pg/cnpg-i-machinery/pkg/pluginhelper/http"
restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"google.golang.org/grpc"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// CNPGI is the implementation of the PostgreSQL sidecar
type CNPGI struct {
PluginPath string
SpoolDirectory string
ArchiveConfiguration client.ObjectKey
ClusterObjectKey client.ObjectKey
BackupToRestoreObjectKey client.ObjectKey
Client client.Client
PGDataPath string
}
// Start starts the GRPC service
func (c *CNPGI) Start(ctx context.Context) error {
// PgWalVolumePgWalPath is the path of pg_wal directory inside the WAL volume when present
const PgWalVolumePgWalPath = "/var/lib/postgresql/wal/pg_wal"
enrich := func(server *grpc.Server) error {
restore.RegisterRestoreJobHooksServer(server, &JobHookImpl{
Client: c.Client,
ClusterObjectKey: c.ClusterObjectKey,
BackupToRestore: c.BackupToRestoreObjectKey,
SpoolDirectory: c.SpoolDirectory,
PgDataPath: c.PGDataPath,
PgWalFolderToSymlink: PgWalVolumePgWalPath,
ArchiveConfiguration: c.ArchiveConfiguration,
})
return nil
}
srv := http.Server{
IdentityImpl: IdentityImplementation{},
Enrichers: []http.ServerEnricher{enrich},
PluginPath: c.PluginPath,
}
return srv.Start(ctx)
}

View File

@ -37,6 +37,7 @@ type ObjectStoreReconciler struct {
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;patch;update;get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;patch;update;get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;list;get;watch;delete
// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=backups,verbs=get;list;watch
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=barmancloud.cnpg.io,resources=objectstores/finalizers,verbs=update

View File

@ -18,6 +18,7 @@ spec:
serviceAccountName: plugin-barman-cloud
containers:
- image: plugin-barman-cloud:latest
imagePullPolicy: IfNotPresent
name: barman-cloud
ports:
- containerPort: 9090
@ -29,11 +30,13 @@ spec:
key: SIDECAR_IMAGE
name: plugin-barman-cloud
args:
- operator
- --server-cert=/server/tls.crt
- --server-key=/server/tls.key
- --client-cert=/client/tls.crt
- --server-address=:9090
- --leader-elect
- --log-level=debug
volumeMounts:
- mountPath: /server
name: server

View File

@ -11,9 +11,9 @@ resources:
- ../config/rbac
images:
- name: plugin-barman-cloud
newName: kind.local/github.com/cloudnative-pg/plugin-barman-cloud/cmd/operator
newTag: 7e901b38eaf33b047dcf2eb044c9c8ca85535d8041a3144d25f7e1a4690ea071
# result of kind load docker-image
newName: docker.io/library/plugin-barman-cloud
secretGenerator:
- literals:
- SIDECAR_IMAGE=kind.local/github.com/cloudnative-pg/plugin-barman-cloud/cmd/instance:ca1fd58413940a247bc52cdb44f4a6909192d781b1767dc7ee9625368ee9d7e2
- SIDECAR_IMAGE=docker.io/library/plugin-barman-cloud
name: plugin-barman-cloud