feat: retention policy (#191)

This commit makes the Barman cloud plugin support the enforcement of
retention policy as provided by the barman-cloud tool suite.

The first recoverability point and the last successful backup are
shown in the status of the ObjectStore resource for each involved
server name.

Signed-off-by: Leonardo Cecchi <leonardo.cecchi@enterprisedb.com>
Signed-off-by: Armando Ruocco <armando.ruocco@enterprisedb.com>
Signed-off-by: Marco Nenciarini <marco.nenciarini@enterprisedb.com>
Co-authored-by: Armando Ruocco <armando.ruocco@enterprisedb.com>
Co-authored-by: Marco Nenciarini <marco.nenciarini@enterprisedb.com>
This commit is contained in:
Leonardo Cecchi 2025-03-18 17:35:22 +01:00 committed by GitHub
parent f52007914f
commit fecd1e9513
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 572 additions and 49 deletions

View File

@ -27,20 +27,48 @@ type InstanceSidecarConfiguration struct {
// The environment to be explicitly passed to the sidecar
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
// The retentionCheckInterval defines the frequency at which the
// system checks and enforces retention policies.
// +kubebuilder:default:=1800
// +optional
RetentionPolicyIntervalSeconds int `json:"retentionPolicyIntervalSeconds,omitempty"`
}
// ObjectStoreSpec defines the desired state of ObjectStore.
type ObjectStoreSpec struct {
// The configuration for the barman-cloud tool suite
Configuration barmanapi.BarmanObjectStoreConfiguration `json:"configuration"`
// RetentionPolicy is the retention policy to be used for backups
// and WALs (i.e. '60d'). The retention policy is expressed in the form
// of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
// days, weeks, months.
// +kubebuilder:validation:Pattern=^[1-9][0-9]*[dwm]$
// +optional
RetentionPolicy string `json:"retentionPolicy,omitempty"`
// +optional
InstanceSidecarConfiguration InstanceSidecarConfiguration `json:"instanceSidecarConfiguration,omitempty"`
}
// ObjectStoreStatus defines the observed state of ObjectStore.
type ObjectStoreStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// ServerRecoveryWindow maps each server to its recovery window
ServerRecoveryWindow map[string]RecoveryWindow `json:"serverRecoveryWindow,omitempty"`
}
// RecoveryWindow represents the time span between the first
// recoverability point and the last successful backup of a PostgreSQL
// server, defining the period during which data can be restored.
type RecoveryWindow struct {
// The first recoverability point in a PostgreSQL server refers to
// the earliest point in time to which the database can be
// restored.
FirstRecoverabilityPoint *metav1.Time `json:"firstRecoverabilityPoint,omitempty"`
// The last successful backup time
LastSuccessfulBackupTime *metav1.Time `json:"lastSuccussfulBackupTime,omitempty"`
}
// +kubebuilder:object:root=true

View File

@ -53,7 +53,7 @@ func (in *ObjectStore) DeepCopyInto(out *ObjectStore) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStore.
@ -126,6 +126,13 @@ func (in *ObjectStoreSpec) DeepCopy() *ObjectStoreSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectStoreStatus) DeepCopyInto(out *ObjectStoreStatus) {
*out = *in
if in.ServerRecoveryWindow != nil {
in, out := &in.ServerRecoveryWindow, &out.ServerRecoveryWindow
*out = make(map[string]RecoveryWindow, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreStatus.
@ -137,3 +144,26 @@ func (in *ObjectStoreStatus) DeepCopy() *ObjectStoreStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecoveryWindow) DeepCopyInto(out *RecoveryWindow) {
*out = *in
if in.FirstRecoverabilityPoint != nil {
in, out := &in.FirstRecoverabilityPoint, &out.FirstRecoverabilityPoint
*out = (*in).DeepCopy()
}
if in.LastSuccessfulBackupTime != nil {
in, out := &in.LastSuccessfulBackupTime, &out.LastSuccessfulBackupTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecoveryWindow.
func (in *RecoveryWindow) DeepCopy() *RecoveryWindow {
if in == nil {
return nil
}
out := new(RecoveryWindow)
in.DeepCopyInto(out)
return out
}

View File

@ -40,9 +40,7 @@ spec:
description: ObjectStoreSpec defines the desired state of ObjectStore.
properties:
configuration:
description: |-
BarmanObjectStoreConfiguration contains the backup configuration
using Barman against an S3-compatible object storage
description: The configuration for the barman-cloud tool suite
properties:
azureCredentials:
description: The credentials to use to upload data to Azure Blob
@ -502,12 +500,49 @@ spec:
- name
type: object
type: array
retentionPolicyIntervalSeconds:
default: 1800
description: |-
The retentionCheckInterval defines the frequency at which the
system checks and enforces retention policies.
type: integer
type: object
retentionPolicy:
description: |-
RetentionPolicy is the retention policy to be used for backups
and WALs (i.e. '60d'). The retention policy is expressed in the form
of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
days, weeks, months.
pattern: ^[1-9][0-9]*[dwm]$
type: string
required:
- configuration
type: object
status:
description: ObjectStoreStatus defines the observed state of ObjectStore.
properties:
serverRecoveryWindow:
additionalProperties:
description: |-
RecoveryWindow represents the time span between the first
recoverability point and the last successful backup of a PostgreSQL
server, defining the period during which data can be restored.
properties:
firstRecoverabilityPoint:
description: |-
The first recoverability point in a PostgreSQL server refers to
the earliest point in time to which the database can be
restored.
format: date-time
type: string
lastSuccussfulBackupTime:
description: The last successful backup time
format: date-time
type: string
type: object
description: ServerRecoveryWindow maps each server to its recovery
window
type: object
type: object
required:
- metadata

View File

@ -0,0 +1,29 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
instances: 3
imagePullPolicy: Always
backup:
barmanObjectStore:
destinationPath: s3://backups/
endpointURL: http://minio:9000
s3Credentials:
accessKeyId:
name: minio
key: ACCESS_KEY_ID
secretAccessKey:
name: minio
key: ACCESS_SECRET_KEY
wal:
compression: gzip
data:
additionalCommandArgs:
- "--min-chunk-size=5MB"
- "--read-timeout=60"
- "-vv"
storage:
size: 1Gi

View File

@ -3,6 +3,9 @@ kind: ObjectStore
metadata:
name: minio-store
spec:
retentionPolicy: "1m"
instanceSidecarConfiguration:
retentionPolicyIntervalSeconds: 30
configuration:
endpointCA:
name: minio-server-tls

14
hack/build-dev-image.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env sh
# This script builds the images of the barman cloud plugin, to be used
# to quickly test images in a development environment.
#
# After each run, the built images will have these names:
#
# - `plugin-barman-cloud:dev`
# - `plugin-barman-cloud-sidecar:dev`
set -eu
docker build -t plugin-barman-cloud:dev --file containers/Dockerfile.plugin .
docker build -t plugin-barman-cloud-sidecar:dev --file containers/Dockerfile.sidecar .

View File

@ -18,6 +18,7 @@ func NewCmd() *cobra.Command {
RunE: func(cmd *cobra.Command, _ []string) error {
requiredSettings := []string{
"namespace",
"cluster-name",
"pod-name",
"spool-directory",
}
@ -33,6 +34,7 @@ func NewCmd() *cobra.Command {
}
_ = viper.BindEnv("namespace", "NAMESPACE")
_ = viper.BindEnv("cluster-name", "CLUSTER_NAME")
_ = viper.BindEnv("pod-name", "POD_NAME")
_ = viper.BindEnv("pgdata", "PGDATA")
_ = viper.BindEnv("spool-directory", "SPOOL_DIRECTORY")

View File

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
"strconv"
"time"
barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup"
@ -20,7 +19,6 @@ import (
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
)
@ -148,11 +146,6 @@ func (b BackupServiceImplementation) Backup(
EndLsn: executedBackupInfo.EndLSN,
InstanceId: b.InstanceName,
Online: true,
Metadata: map[string]string{
"timeline": strconv.Itoa(executedBackupInfo.TimeLine),
"version": metadata.Data.Version,
"name": metadata.Data.Name,
"displayName": metadata.Data.DisplayName,
},
Metadata: newBackupResultMetadata(configuration.Cluster.ObjectMeta.UID, executedBackupInfo.TimeLine).toMap(),
}, nil
}

View File

@ -5,15 +5,16 @@ import (
"path"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/scheme"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
@ -26,7 +27,10 @@ func Start(ctx context.Context) error {
setupLog := log.FromContext(ctx)
setupLog.Info("Starting barman cloud instance plugin")
podName := viper.GetString("pod-name")
clusterName := viper.GetString("cluster-name")
namespace := viper.GetString("namespace")
controllerOptions := ctrl.Options{
Scheme: scheme,
@ -36,6 +40,7 @@ func Start(ctx context.Context) error {
&corev1.Secret{},
&barmancloudv1.ObjectStore{},
&cnpgv1.Cluster{},
&cnpgv1.Backup{},
},
},
},
@ -47,10 +52,11 @@ func Start(ctx context.Context) error {
return err
}
customCacheClient := extendedclient.NewExtendedClient(mgr.GetClient())
if err := mgr.Add(&CNPGI{
Client: extendedclient.NewExtendedClient(mgr.GetClient()),
InstanceName: podName,
// TODO: improve
Client: customCacheClient,
InstanceName: podName,
PGDataPath: viper.GetString("pgdata"),
PGWALPath: path.Join(viper.GetString("pgdata"), "pg_wal"),
SpoolDirectory: viper.GetString("spool-directory"),
@ -60,6 +66,19 @@ func Start(ctx context.Context) error {
return err
}
if err := mgr.Add(&CatalogMaintenanceRunnable{
Client: customCacheClient,
Recorder: mgr.GetEventRecorderFor("policy-runnable"),
ClusterKey: types.NamespacedName{
Namespace: namespace,
Name: clusterName,
},
CurrentPodName: podName,
}); err != nil {
setupLog.Error(err, "unable to policy enforcement runnable")
return err
}
if err := mgr.Start(ctx); err != nil {
return err
}

View File

@ -0,0 +1,265 @@
package instance
import (
"context"
"fmt"
"os"
"slices"
"time"
"github.com/cloudnative-pg/barman-cloud/pkg/catalog"
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/machinery/pkg/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/common"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/operator/config"
)
// defaultRetentionPolicyInterval is the retention policy interval
// used when the current cluster or barman object store can't
// be read or when the enforcement process failed
const defaultRetentionPolicyInterval = time.Minute * 5
// CatalogMaintenanceRunnable executes all the barman catalog maintenance operations
type CatalogMaintenanceRunnable struct {
Client client.Client
Recorder record.EventRecorder
ClusterKey types.NamespacedName
CurrentPodName string
}
// Start enforces the backup retention policies periodically, using the
// period specified in the BarmanObjectStore object
func (c *CatalogMaintenanceRunnable) Start(ctx context.Context) error {
contextLogger := log.FromContext(ctx)
contextLogger.Info("Starting retention policy runnable")
for {
// Enforce the retention policies
period, err := c.cycle(ctx)
if err != nil {
contextLogger.Error(err, "Retention policy enforcement failed")
}
if period == 0 {
period = defaultRetentionPolicyInterval
}
select {
case <-time.After(period):
case <-ctx.Done():
return nil
}
}
}
// cycle enforces the retention policies. On success, it returns the amount
// of time to wait to the next check.
func (c *CatalogMaintenanceRunnable) cycle(ctx context.Context) (time.Duration, error) {
var cluster cnpgv1.Cluster
var barmanObjectStore barmancloudv1.ObjectStore
if err := c.Client.Get(ctx, c.ClusterKey, &cluster); err != nil {
return 0, err
}
configuration := config.NewFromCluster(&cluster)
if err := c.Client.Get(ctx, configuration.GetBarmanObjectKey(), &barmanObjectStore); err != nil {
return 0, err
}
if err := c.maintenance(ctx, &cluster, &barmanObjectStore); err != nil {
return 0, err
}
nextCheckInterval := time.Second * time.Duration(
barmanObjectStore.Spec.InstanceSidecarConfiguration.RetentionPolicyIntervalSeconds)
return nextCheckInterval, nil
}
// maintenance executes a collection of operations:
//
// - applies the retention policy to the object.
//
// - store and deletes the stale Kubernetes backup objects.
//
// - updates the first recoverability point.
func (c *CatalogMaintenanceRunnable) maintenance(
ctx context.Context,
cluster *cnpgv1.Cluster,
objectStore *barmancloudv1.ObjectStore,
) error {
contextLogger := log.FromContext(ctx)
configuration := config.NewFromCluster(cluster)
retentionPolicy := objectStore.Spec.RetentionPolicy
if cluster.Status.CurrentPrimary != c.CurrentPodName {
contextLogger.Info(
"Skipping retention policy enforcement, not the current primary",
"currentPrimary", cluster.Status.CurrentPrimary, "podName", c.CurrentPodName)
return nil
}
env, err := barmanCredentials.EnvSetBackupCloudCredentials(
ctx,
c.Client,
objectStore.Namespace,
&objectStore.Spec.Configuration,
common.MergeEnv(os.Environ(), common.GetRestoreCABundleEnv(&objectStore.Spec.Configuration)))
if err != nil {
contextLogger.Error(err, "while setting backup cloud credentials")
return err
}
if len(retentionPolicy) == 0 {
contextLogger.Info("Skipping retention policy enforcement, no retention policy specified")
} else {
contextLogger.Info("Applying backup retention policy",
"retentionPolicy", retentionPolicy)
if err := barmanCommand.DeleteBackupsByPolicy(
ctx,
&objectStore.Spec.Configuration,
configuration.ServerName,
env,
retentionPolicy,
); err != nil {
contextLogger.Error(err, "while enforcing retention policies")
c.Recorder.Event(cluster, "Warning", "RetentionPolicyFailed", "Retention policy failed")
return err
}
}
backupList, err := barmanCommand.GetBackupList(
ctx,
&objectStore.Spec.Configuration,
configuration.ServerName,
env,
)
if err != nil {
contextLogger.Error(err, "while reading the backup list")
return err
}
if err := deleteBackupsNotInCatalog(ctx, c.Client, cluster, backupList.GetBackupIDs()); err != nil {
contextLogger.Error(err, "while deleting Backups not present in the catalog")
return err
}
return c.updateRecoveryWindow(ctx, backupList, objectStore, configuration.ServerName)
}
// updateRecoveryWindow updates the recovery window inside the object
// store status subresource
func (c *CatalogMaintenanceRunnable) updateRecoveryWindow(
ctx context.Context,
backupList *catalog.Catalog,
objectStore *barmancloudv1.ObjectStore,
serverName string,
) error {
// Set the recovery window inside the barman object store object
convertTime := func(t *time.Time) *metav1.Time {
if t == nil {
return nil
}
return ptr.To(metav1.NewTime(*t))
}
recoveryWindow := barmancloudv1.RecoveryWindow{
FirstRecoverabilityPoint: convertTime(backupList.GetFirstRecoverabilityPoint()),
LastSuccessfulBackupTime: convertTime(backupList.GetLastSuccessfulBackupTime()),
}
if objectStore.Status.ServerRecoveryWindow == nil {
objectStore.Status.ServerRecoveryWindow = make(map[string]barmancloudv1.RecoveryWindow)
}
objectStore.Status.ServerRecoveryWindow[serverName] = recoveryWindow
return c.Client.Status().Update(ctx, objectStore)
}
// deleteBackupsNotInCatalog deletes all Backup objects pointing to the given cluster that are not
// present in the backup anymore
func deleteBackupsNotInCatalog(
ctx context.Context,
cli client.Client,
cluster *cnpgv1.Cluster,
backupIDs []string,
) error {
// We had two options:
//
// A. quicker
// get policy checker function
// get all backups in the namespace for this cluster
// check with policy checker function if backup should be deleted, then delete it if true
//
// B. more precise
// get the catalog (GetBackupList)
// get all backups in the namespace for this cluster
// go through all backups and delete them if not in the catalog
//
// 1: all backups in the bucket should be also in the cluster
// 2: all backups in the cluster should be in the bucket
//
// A can violate 1 and 2
// A + B can still violate 2
// B satisfies 1 and 2
//
// We chose to go with B
contextLogger := log.FromContext(ctx)
contextLogger.Debug("Checking the catalog to delete backups not present anymore")
backups := cnpgv1.BackupList{}
if err := cli.List(ctx, &backups, client.InNamespace(cluster.GetNamespace())); err != nil {
return fmt.Errorf("while getting backups: %w", err)
}
var errors []error
for id, backup := range backups.Items {
if backup.Spec.Cluster.Name != cluster.GetName() ||
backup.Status.Phase != cnpgv1.BackupPhaseCompleted ||
!useSameBackupLocation(&backup.Status, cluster) {
continue
}
// here we could add further checks, e.g. if the backup is not found but would still
// be in the retention policy we could either not delete it or update it is status
if !slices.Contains(backupIDs, backup.Status.BackupID) {
contextLogger.Info("Deleting backup not in the catalog", "backup", backup.Name)
if err := cli.Delete(ctx, &backups.Items[id]); err != nil {
errors = append(errors, fmt.Errorf(
"while deleting backup %s/%s: %w",
backup.Namespace,
backup.Name,
err,
))
}
}
}
if len(errors) > 0 {
return fmt.Errorf("got errors while deleting Backups not in the cluster: %v", errors)
}
return nil
}
// useSameBackupLocation checks whether the given backup was taken using the same configuration as provided
func useSameBackupLocation(backup *cnpgv1.BackupStatus, cluster *cnpgv1.Cluster) bool {
if backup.Method != cnpgv1.BackupMethodPlugin {
return false
}
meta := newBackupResultMetadataFromMap(backup.PluginMetadata)
return meta.clusterUID == string(cluster.UID) && meta.pluginName == metadata.PluginName
}

View File

@ -0,0 +1,56 @@
package instance
import (
"strconv"
"k8s.io/apimachinery/pkg/types"
"github.com/cloudnative-pg/plugin-barman-cloud/internal/cnpgi/metadata"
)
type backupResultMetadata struct {
timeline string
version string
name string
displayName string
clusterUID string
pluginName string
}
func (b backupResultMetadata) toMap() map[string]string {
return map[string]string{
"timeline": b.timeline,
"version": b.version,
"name": b.name,
"displayName": b.displayName,
"clusterUID": b.clusterUID,
"pluginName": b.pluginName,
}
}
func newBackupResultMetadata(clusterUID types.UID, timeline int) backupResultMetadata {
return backupResultMetadata{
timeline: strconv.Itoa(timeline),
clusterUID: string(clusterUID),
// static values
version: metadata.Data.Version,
name: metadata.Data.Name,
displayName: metadata.Data.DisplayName,
pluginName: metadata.PluginName,
}
}
func newBackupResultMetadataFromMap(m map[string]string) backupResultMetadata {
if m == nil {
return backupResultMetadata{}
}
return backupResultMetadata{
timeline: m["timeline"],
version: m["version"],
name: m["name"],
displayName: m["displayName"],
clusterUID: m["clusterUID"],
pluginName: m["pluginName"],
}
}

View File

@ -35,35 +35,49 @@ func BuildRole(
}
}
role.Rules = append(role.Rules, rbacv1.PolicyRule{
APIGroups: []string{
"barmancloud.cnpg.io",
role.Rules = append(
role.Rules,
rbacv1.PolicyRule{
APIGroups: []string{
"barmancloud.cnpg.io",
},
Verbs: []string{
"get",
"watch",
"list",
},
Resources: []string{
"objectstores",
},
ResourceNames: barmanObjectsSet.ToSortedList(),
},
Verbs: []string{
"get",
"watch",
"list",
rbacv1.PolicyRule{
APIGroups: []string{
"barmancloud.cnpg.io",
},
Verbs: []string{
"update",
},
Resources: []string{
"objectstores/status",
},
ResourceNames: barmanObjectsSet.ToSortedList(),
},
Resources: []string{
"objectstores",
rbacv1.PolicyRule{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"get",
"watch",
"list",
},
ResourceNames: secretsSet.ToSortedList(),
},
ResourceNames: barmanObjectsSet.ToSortedList(),
})
role.Rules = append(role.Rules, rbacv1.PolicyRule{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"get",
"watch",
"list",
},
ResourceNames: secretsSet.ToSortedList(),
})
)
return role
}

View File

@ -4,6 +4,7 @@ import (
"context"
cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -11,7 +12,6 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
)

View File

@ -20,10 +20,10 @@ import (
"context"
"fmt"
"github.com/cloudnative-pg/machinery/pkg/log"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
barmancloudv1 "github.com/cloudnative-pg/plugin-barman-cloud/api/v1"
)

View File

@ -39,9 +39,7 @@ spec:
description: ObjectStoreSpec defines the desired state of ObjectStore.
properties:
configuration:
description: |-
BarmanObjectStoreConfiguration contains the backup configuration
using Barman against an S3-compatible object storage
description: The configuration for the barman-cloud tool suite
properties:
azureCredentials:
description: The credentials to use to upload data to Azure Blob
@ -501,12 +499,49 @@ spec:
- name
type: object
type: array
retentionPolicyIntervalSeconds:
default: 1800
description: |-
The retentionCheckInterval defines the frequency at which the
system checks and enforces retention policies.
type: integer
type: object
retentionPolicy:
description: |-
RetentionPolicy is the retention policy to be used for backups
and WALs (i.e. '60d'). The retention policy is expressed in the form
of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
days, weeks, months.
pattern: ^[1-9][0-9]*[dwm]$
type: string
required:
- configuration
type: object
status:
description: ObjectStoreStatus defines the observed state of ObjectStore.
properties:
serverRecoveryWindow:
additionalProperties:
description: |-
RecoveryWindow represents the time span between the first
recoverability point and the last successful backup of a PostgreSQL
server, defining the period during which data can be restored.
properties:
firstRecoverabilityPoint:
description: |-
The first recoverability point in a PostgreSQL server refers to
the earliest point in time to which the database can be
restored.
format: date-time
type: string
lastSuccussfulBackupTime:
description: The last successful backup time
format: date-time
type: string
type: object
description: ServerRecoveryWindow maps each server to its recovery
window
type: object
type: object
required:
- metadata