From 1003bd56f088c180837e8aae1a0ed2b5766d0c80 Mon Sep 17 00:00:00 2001 From: Remco Beckers Date: Tue, 24 Feb 2026 12:25:25 +0100 Subject: [PATCH 1/4] STAC-23466 Restore backup from local settings bucket Keeps support for the old PVC if configured. --- cmd/cmdutils/common.go | 15 ++-- cmd/settings/list.go | 78 +++++++++++++++--- cmd/settings/restore.go | 32 +++++--- cmd/stackgraph/check_and_finalize.go | 2 +- cmd/stackgraph/list.go | 11 +-- cmd/stackgraph/restore.go | 19 +++-- cmd/victoriametrics/check_and_finalize.go | 2 +- cmd/victoriametrics/list.go | 11 +-- cmd/victoriametrics/restore.go | 18 ++-- internal/app/app.go | 7 +- internal/foundation/config/config.go | 82 +++++++++++++++++-- internal/orchestration/restore/resources.go | 20 +++-- .../scripts/restore-settings-backup.sh | 46 +++++++++-- 13 files changed, 257 insertions(+), 86 deletions(-) diff --git a/cmd/cmdutils/common.go b/cmd/cmdutils/common.go index 98d5a1e..d0e5772 100644 --- a/cmd/cmdutils/common.go +++ b/cmd/cmdutils/common.go @@ -9,18 +9,23 @@ import ( ) const ( - MinioIsRequired bool = true - MinioIsNotRequired bool = false + StorageIsRequired bool = true + StorageIsNotRequired bool = false + + // MinioIsRequired is deprecated: use StorageIsRequired instead + MinioIsRequired = StorageIsRequired + // MinioIsNotRequired is deprecated: use StorageIsNotRequired instead + MinioIsNotRequired = StorageIsNotRequired ) -func Run(globalFlags *config.CLIGlobalFlags, runFunc func(ctx *app.Context) error, minioRequired bool) { +func Run(globalFlags *config.CLIGlobalFlags, runFunc func(ctx *app.Context) error, storageRequired bool) { appCtx, err := app.NewContext(globalFlags) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "❌ Error: %v\n", err) os.Exit(1) } - if minioRequired && !appCtx.Config.Minio.Enabled { - appCtx.Logger.Errorf("commands that interact with Minio require SUSE Observability to be deployed with .Values.global.backup.enabled=true") + if storageRequired && !appCtx.Config.StorageEnabled() { + appCtx.Logger.Errorf("commands that interact with S3-compatible storage require SUSE Observability to be deployed with .Values.global.backup.enabled=true") os.Exit(1) } if err := runFunc(appCtx); err != nil { diff --git a/cmd/settings/list.go b/cmd/settings/list.go index 265e0d5..9c2be55 100644 --- a/cmd/settings/list.go +++ b/cmd/settings/list.go @@ -68,24 +68,34 @@ func runList(appCtx *app.Context) error { return appCtx.Formatter.PrintTable(table) } -// getAllBackups retrieves backups from all sources (S3 and PVC), deduplicates and sorts them by LastModified time (most recent first) +// getAllBackups retrieves backups from all sources, deduplicates and sorts them by LastModified time (most recent first). +// In legacy mode (Minio): combines S3 backups (if Minio enabled) + PVC backups. +// In new mode (Storage): combines S3 backups + local bucket backups (from settings.localBucket). func getAllBackups(appCtx *app.Context) ([]BackupFileInfo, error) { var backups []BackupFileInfo var err error - // Get backups from S3 if enabled - if appCtx.Config.Minio.Enabled { + // Get backups from S3 if storage is enabled + if appCtx.Config.StorageEnabled() { if backups, err = getBackupListFromS3(appCtx); err != nil { - return nil, fmt.Errorf("failed to get list of backups from Minio: %v", err) + return nil, fmt.Errorf("failed to get list of backups from S3 storage: %v", err) } } - // Get backups from PVC - backupsFromPvc, err := getBackupListFromPVC(appCtx) - if err != nil { - return nil, fmt.Errorf("failed to get list of backups from PVC: %v", err) + // Get local backups: from PVC in legacy mode, from localBucket in new mode + var localBackups []BackupFileInfo + if appCtx.Config.IsLegacyMode() { + localBackups, err = getBackupListFromPVC(appCtx) + if err != nil { + return nil, fmt.Errorf("failed to get list of backups from PVC: %v", err) + } + } else { + localBackups, err = getBackupListFromLocalBucket(appCtx) + if err != nil { + return nil, fmt.Errorf("failed to get list of backups from local bucket: %v", err) + } } - backups = append(backups, backupsFromPvc...) + backups = append(backups, localBackups...) if len(backups) == 0 { return []BackupFileInfo{}, nil @@ -117,10 +127,11 @@ type BackupFileInfo struct { } func getBackupListFromS3(appCtx *app.Context) ([]BackupFileInfo, error) { - // Setup port-forward to Minio - serviceName := appCtx.Config.Minio.Service.Name - localPort := appCtx.Config.Minio.Service.LocalPortForwardPort - remotePort := appCtx.Config.Minio.Service.Port + // Setup port-forward to S3-compatible storage + storageService := appCtx.Config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port pf, err := portforward.SetupPortForward(appCtx.K8sClient, appCtx.Namespace, serviceName, localPort, remotePort, appCtx.Logger) if err != nil { @@ -159,6 +170,47 @@ func getBackupListFromS3(appCtx *app.Context) ([]BackupFileInfo, error) { return backups, nil } +// getBackupListFromLocalBucket lists settings backups from the local S3 bucket (new mode). +// This replaces PVC-based listing when using the new storage configuration. +func getBackupListFromLocalBucket(appCtx *app.Context) ([]BackupFileInfo, error) { + // Setup port-forward to S3-compatible storage + storageService := appCtx.Config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port + + pf, err := portforward.SetupPortForward(appCtx.K8sClient, appCtx.Namespace, serviceName, localPort, remotePort, appCtx.Logger) + if err != nil { + return nil, err + } + defer close(pf.StopChan) + + bucket := appCtx.Config.Settings.LocalBucket + + appCtx.Logger.Infof("Listing local Settings backups in bucket '%s'...", bucket) + + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + } + + result, err := appCtx.S3Client.ListObjectsV2(context.Background(), input) + if err != nil { + return nil, fmt.Errorf("failed to list objects in local bucket: %w", err) + } + + filteredObjects := s3client.FilterBackupObjects(result.Contents, isMultiPartArchive) + + var backups []BackupFileInfo + for _, obj := range filteredObjects { + backups = append(backups, BackupFileInfo{ + Filename: obj.Key, + LastModified: obj.LastModified, + Size: obj.Size, + }) + } + return backups, nil +} + func getBackupListFromPVC(appCtx *app.Context) ([]BackupFileInfo, error) { // Setup Kubernetes resources for list job appCtx.Logger.Println() diff --git a/cmd/settings/restore.go b/cmd/settings/restore.go index 2bd9fb5..43633b1 100644 --- a/cmd/settings/restore.go +++ b/cmd/settings/restore.go @@ -182,34 +182,42 @@ func createRestoreJob(k8sClient *k8s.Client, namespace, jobName, backupFile stri // buildEnvVar constructs environment variables for the container spec func buildEnvVar(extraEnvVar []corev1.EnvVar, config *config.Config) []corev1.EnvVar { + storageService := config.GetStorageService() commonVar := []corev1.EnvVar{ {Name: "BACKUP_CONFIGURATION_BUCKET_NAME", Value: config.Settings.Bucket}, {Name: "BACKUP_CONFIGURATION_S3_PREFIX", Value: config.Settings.S3Prefix}, - {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", config.Minio.Service.Name, config.Minio.Service.Port)}, + {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", storageService.Name, storageService.Port)}, {Name: "STACKSTATE_BASE_URL", Value: config.Settings.Restore.BaseURL}, {Name: "RECEIVER_BASE_URL", Value: config.Settings.Restore.ReceiverBaseURL}, {Name: "PLATFORM_VERSION", Value: config.Settings.Restore.PlatformVersion}, {Name: "ZOOKEEPER_QUORUM", Value: config.Settings.Restore.ZookeeperQuorum}, - {Name: "BACKUP_CONFIGURATION_UPLOAD_REMOTE", Value: strconv.FormatBool(config.Minio.Enabled)}, + {Name: "BACKUP_CONFIGURATION_UPLOAD_REMOTE", Value: strconv.FormatBool(config.StorageEnabled())}, + } + if config.Settings.LocalBucket != "" { + commonVar = append(commonVar, corev1.EnvVar{Name: "BACKUP_CONFIGURATION_LOCAL_BUCKET", Value: config.Settings.LocalBucket}) } commonVar = append(commonVar, extraEnvVar...) return commonVar } // buildVolumeMounts constructs volume mounts for the restore job container -func buildVolumeMounts() []corev1.VolumeMount { - return []corev1.VolumeMount{ +func buildVolumeMounts(config *config.Config) []corev1.VolumeMount { + mounts := []corev1.VolumeMount{ {Name: "backup-log", MountPath: "/opt/docker/etc_log"}, {Name: "backup-restore-scripts", MountPath: "/backup-restore-scripts"}, {Name: "minio-keys", MountPath: "/aws-keys"}, {Name: "tmp-data", MountPath: "/tmp-data"}, - {Name: "settings-backup-data", MountPath: "/settings-backup-data"}, } + // Only mount PVC in legacy mode + if config.IsLegacyMode() { + mounts = append(mounts, corev1.VolumeMount{Name: "settings-backup-data", MountPath: "/settings-backup-data"}) + } + return mounts } // buildVolumes constructs volumes for the restore job pod func buildVolumes(config *config.Config, defaultMode int32) []corev1.Volume { - return []corev1.Volume{ + volumes := []corev1.Volume{ { Name: "backup-log", VolumeSource: corev1.VolumeSource{ @@ -235,7 +243,7 @@ func buildVolumes(config *config.Config, defaultMode int32) []corev1.Volume { Name: "minio-keys", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: restore.MinioKeysSecretName, + SecretName: restore.StorageKeysSecretName, }, }, }, @@ -245,15 +253,19 @@ func buildVolumes(config *config.Config, defaultMode int32) []corev1.Volume { EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }, - { + } + // Only include PVC volume in legacy mode + if config.IsLegacyMode() { + volumes = append(volumes, corev1.Volume{ Name: "settings-backup-data", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: config.Settings.Restore.PVC, }, }, - }, + }) } + return volumes } // buildContainers constructs containers for the restore job @@ -266,6 +278,6 @@ func buildContainer(envVar []corev1.EnvVar, command []string, config *config.Con Command: command, Env: envVar, Resources: k8s.ConvertResources(config.Settings.Restore.Job.Resources), - VolumeMounts: buildVolumeMounts(), + VolumeMounts: buildVolumeMounts(config), } } diff --git a/cmd/stackgraph/check_and_finalize.go b/cmd/stackgraph/check_and_finalize.go index 2895eb0..194657e 100644 --- a/cmd/stackgraph/check_and_finalize.go +++ b/cmd/stackgraph/check_and_finalize.go @@ -31,7 +31,7 @@ Examples: # Wait for job completion and cleanup sts-backup stackgraph check-and-finalize --job stackgraph-restore-20250128t143000 --wait -n my-namespace`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.StorageIsRequired) }, } diff --git a/cmd/stackgraph/list.go b/cmd/stackgraph/list.go index 330fa8c..9f309c6 100644 --- a/cmd/stackgraph/list.go +++ b/cmd/stackgraph/list.go @@ -22,16 +22,17 @@ func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Use: "list", Short: "List available Stackgraph backups from S3/Minio", Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runList, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runList, cmdutils.StorageIsRequired) }, } } func runList(appCtx *app.Context) error { - // Setup port-forward to Minio - serviceName := appCtx.Config.Minio.Service.Name - localPort := appCtx.Config.Minio.Service.LocalPortForwardPort - remotePort := appCtx.Config.Minio.Service.Port + // Setup port-forward to S3-compatible storage + storageService := appCtx.Config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port pf, err := portforward.SetupPortForward(appCtx.K8sClient, appCtx.Namespace, serviceName, localPort, remotePort, appCtx.Logger) if err != nil { diff --git a/cmd/stackgraph/restore.go b/cmd/stackgraph/restore.go index 1f61c42..157038d 100644 --- a/cmd/stackgraph/restore.go +++ b/cmd/stackgraph/restore.go @@ -43,7 +43,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "Restore Stackgraph from a backup archive", Long: `Restore Stackgraph data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runRestore, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, } @@ -144,10 +144,11 @@ func waitAndCleanupRestoreJob(k8sClient *k8s.Client, namespace, jobName string, // getLatestBackup retrieves the most recent backup from S3 func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Config, log *logger.Logger) (string, error) { - // Setup port-forward to Minio - serviceName := config.Minio.Service.Name - localPort := config.Minio.Service.LocalPortForwardPort - remotePort := config.Minio.Service.Port + // Setup port-forward to S3-compatible storage + storageService := config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port pf, err := portforward.SetupPortForward(k8sClient, namespace, serviceName, localPort, remotePort, log) if err != nil { @@ -157,7 +158,7 @@ func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Con // Create S3 client endpoint := fmt.Sprintf("http://localhost:%d", pf.LocalPort) - s3Client, err := s3client.NewClient(endpoint, config.Minio.AccessKey, config.Minio.SecretKey) + s3Client, err := s3client.NewClient(endpoint, config.GetStorageAccessKey(), config.GetStorageSecretKey()) if err != nil { return "", err } @@ -262,13 +263,14 @@ func createRestoreJob(k8sClient *k8s.Client, namespace, jobName, backupFile stri // buildRestoreEnvVars constructs environment variables for the restore job func buildRestoreEnvVars(backupFile string, config *config.Config) []corev1.EnvVar { + storageService := config.GetStorageService() return []corev1.EnvVar{ {Name: "BACKUP_FILE", Value: backupFile}, {Name: "FORCE_DELETE", Value: purgeStackgraphDataFlag}, {Name: "BACKUP_STACKGRAPH_BUCKET_NAME", Value: config.Stackgraph.Bucket}, {Name: "BACKUP_STACKGRAPH_S3_PREFIX", Value: config.Stackgraph.S3Prefix}, {Name: "BACKUP_STACKGRAPH_MULTIPART_ARCHIVE", Value: strconv.FormatBool(config.Stackgraph.MultipartArchive)}, - {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", config.Minio.Service.Name, config.Minio.Service.Port)}, + {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", storageService.Name, storageService.Port)}, {Name: "ZOOKEEPER_QUORUM", Value: config.Stackgraph.Restore.ZookeeperQuorum}, } } @@ -285,6 +287,7 @@ func buildRestoreVolumeMounts() []corev1.VolumeMount { // buildRestoreInitContainers constructs init containers for the restore job func buildRestoreInitContainers(config *config.Config) []corev1.Container { + storageService := config.GetStorageService() return []corev1.Container{ { Name: "wait", @@ -293,7 +296,7 @@ func buildRestoreInitContainers(config *config.Config) []corev1.Container { Command: []string{ "sh", "-c", - fmt.Sprintf("/entrypoint -c %s:%d -t 300", config.Minio.Service.Name, config.Minio.Service.Port), + fmt.Sprintf("/entrypoint -c %s:%d -t 300", storageService.Name, storageService.Port), }, SecurityContext: k8s.ConvertSecurityContext(config.Stackgraph.Restore.Job.ContainerSecurityContext), }, diff --git a/cmd/victoriametrics/check_and_finalize.go b/cmd/victoriametrics/check_and_finalize.go index a20b433..65a45f7 100644 --- a/cmd/victoriametrics/check_and_finalize.go +++ b/cmd/victoriametrics/check_and_finalize.go @@ -31,7 +31,7 @@ Examples: # Wait for job completion and cleanup sts-backup victoriametrics check-and-finalize --job victoriametrics-restore-20250128t143000 --wait -n my-namespace`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.StorageIsRequired) }, } diff --git a/cmd/victoriametrics/list.go b/cmd/victoriametrics/list.go index 839db9e..adad31c 100644 --- a/cmd/victoriametrics/list.go +++ b/cmd/victoriametrics/list.go @@ -27,16 +27,17 @@ func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Use: "list", Short: "List available VictoriaMetrics backups from S3/Minio", Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runList, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runList, cmdutils.StorageIsRequired) }, } } func runList(appCtx *app.Context) error { - // Setup port-forward to Minio - serviceName := appCtx.Config.Minio.Service.Name - localPort := appCtx.Config.Minio.Service.LocalPortForwardPort - remotePort := appCtx.Config.Minio.Service.Port + // Setup port-forward to S3-compatible storage + storageService := appCtx.Config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port pf, err := portforward.SetupPortForward(appCtx.K8sClient, appCtx.Namespace, serviceName, localPort, remotePort, appCtx.Logger) if err != nil { diff --git a/cmd/victoriametrics/restore.go b/cmd/victoriametrics/restore.go index 231eb81..878cbf0 100644 --- a/cmd/victoriametrics/restore.go +++ b/cmd/victoriametrics/restore.go @@ -40,7 +40,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "Restore VictoriaMetrics from a backup archive", Long: `Restore VictoriaMetrics data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runRestore, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, } @@ -141,10 +141,11 @@ func waitAndCleanupRestoreJob(k8sClient *k8s.Client, namespace, jobName string, // getLatestBackup retrieves the most recent backup from S3 func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Config, log *logger.Logger) (string, error) { - // Setup port-forward to Minio - serviceName := config.Minio.Service.Name - localPort := config.Minio.Service.LocalPortForwardPort - remotePort := config.Minio.Service.Port + // Setup port-forward to S3-compatible storage + storageService := config.GetStorageService() + serviceName := storageService.Name + localPort := storageService.LocalPortForwardPort + remotePort := storageService.Port pf, err := portforward.SetupPortForward(k8sClient, namespace, serviceName, localPort, remotePort, log) if err != nil { @@ -154,7 +155,7 @@ func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Con // Create S3 client endpoint := fmt.Sprintf("http://localhost:%d", pf.LocalPort) - s3Client, err := s3client.NewClient(endpoint, config.Minio.AccessKey, config.Minio.SecretKey) + s3Client, err := s3client.NewClient(endpoint, config.GetStorageAccessKey(), config.GetStorageSecretKey()) if err != nil { return "", err } @@ -228,8 +229,9 @@ func createRestoreJob(k8sClient *k8s.Client, namespace, jobName, backupFile stri // buildRestoreEnvVars constructs environment variables for the restore job func buildRestoreEnvVars(config *config.Config) []corev1.EnvVar { + storageService := config.GetStorageService() return []corev1.EnvVar{ - {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", config.Minio.Service.Name, config.Minio.Service.Port)}, + {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", storageService.Name, storageService.Port)}, } } @@ -252,7 +254,7 @@ func buildRestoreInitContainers(config *config.Config) []corev1.Container { Command: []string{ "sh", "-c", - fmt.Sprintf("/entrypoint -c %s:%d -t 300", config.Minio.Service.Name, config.Minio.Service.Port), + fmt.Sprintf("/entrypoint -c %s:%d -t 300", config.GetStorageService().Name, config.GetStorageService().Port), }, }, } diff --git a/internal/app/app.go b/internal/app/app.go index 7cd1d94..6f61c6e 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -40,9 +40,10 @@ func NewContext(flags *config.CLIGlobalFlags) (*Context, error) { return nil, fmt.Errorf("failed to load configuration: %w", err) } - // Create S3 client - endpoint := fmt.Sprintf("http://localhost:%d", cfg.Minio.Service.LocalPortForwardPort) - s3Client, err := s3.NewClient(endpoint, cfg.Minio.AccessKey, cfg.Minio.SecretKey) + // Create S3 client using storage config (new mode) or minio config (legacy mode) + storageService := cfg.GetStorageService() + endpoint := fmt.Sprintf("http://localhost:%d", storageService.LocalPortForwardPort) + s3Client, err := s3.NewClient(endpoint, cfg.GetStorageAccessKey(), cfg.GetStorageSecretKey()) if err != nil { return nil, err } diff --git a/internal/foundation/config/config.go b/internal/foundation/config/config.go index 64e01d9..b19da10 100644 --- a/internal/foundation/config/config.go +++ b/internal/foundation/config/config.go @@ -18,13 +18,54 @@ import ( type Config struct { Kubernetes KubernetesConfig `yaml:"kubernetes"` Elasticsearch ElasticsearchConfig `yaml:"elasticsearch" validate:"required"` - Minio MinioConfig `yaml:"minio" validate:"required"` + Minio MinioConfig `yaml:"minio"` + Storage StorageConfig `yaml:"storage"` Stackgraph StackgraphConfig `yaml:"stackgraph" validate:"required"` Settings SettingsConfig `yaml:"settings" validate:"required"` VictoriaMetrics VictoriaMetricsConfig `yaml:"victoriaMetrics" validate:"required"` Clickhouse ClickhouseConfig `yaml:"clickhouse" validate:"required"` } +// IsLegacyMode returns true when the configuration uses the legacy Minio config. +// Legacy mode is detected by the presence of the Minio config with a non-empty service name. +func (c *Config) IsLegacyMode() bool { + return c.Minio.Service.Name != "" +} + +// StorageEnabled returns true when S3-compatible storage is available, +// either through legacy Minio (with Enabled=true) or new Storage config. +func (c *Config) StorageEnabled() bool { + if c.IsLegacyMode() { + return c.Minio.Enabled + } + return c.Storage.Service.Name != "" +} + +// GetStorageService returns the service config for the S3-compatible storage, +// using either Storage (new) or Minio (legacy) config. +func (c *Config) GetStorageService() ServiceConfig { + if c.IsLegacyMode() { + return c.Minio.Service + } + return c.Storage.Service +} + +// GetStorageAccessKey returns the access key for the S3-compatible storage. +func (c *Config) GetStorageAccessKey() string { + if c.IsLegacyMode() { + return c.Minio.AccessKey + } + return c.Storage.AccessKey +} + +// GetStorageSecretKey returns the secret key for the S3-compatible storage. +func (c *Config) GetStorageSecretKey() string { + if c.IsLegacyMode() { + return c.Minio.SecretKey + } + return c.Storage.SecretKey +} + // KubernetesConfig holds Kubernetes-wide configuration type KubernetesConfig struct { CommonLabels map[string]string `yaml:"commonLabels"` @@ -77,12 +118,19 @@ type ServiceConfig struct { LocalPortForwardPort int `yaml:"localPortForwardPort" validate:"required,min=1,max=65535"` } -// MinioConfig holds Minio-specific configuration +// MinioConfig holds Minio-specific configuration (legacy mode) type MinioConfig struct { Enabled bool `yaml:"enabled" validate:"boolean"` - Service ServiceConfig `yaml:"service" validate:"required"` - AccessKey string `yaml:"accessKey" validate:"required"` // From secret - SecretKey string `yaml:"secretKey" validate:"required"` // From secret + Service ServiceConfig `yaml:"service" validate:"omitempty"` + AccessKey string `yaml:"accessKey"` // From secret + SecretKey string `yaml:"secretKey"` // From secret +} + +// StorageConfig holds S3-compatible storage configuration (new mode, replaces Minio) +type StorageConfig struct { + Service ServiceConfig `yaml:"service" validate:"omitempty"` + AccessKey string `yaml:"accessKey"` // From secret + SecretKey string `yaml:"secretKey"` // From secret } // StackgraphConfig holds Stackgraph backup-specific configuration @@ -121,9 +169,10 @@ type StackgraphRestoreConfig struct { } type SettingsConfig struct { - Bucket string `yaml:"bucket" validate:"required"` - S3Prefix string `yaml:"s3Prefix"` - Restore SettingsRestoreConfig `yaml:"restore" validate:"required"` + Bucket string `yaml:"bucket" validate:"required"` + S3Prefix string `yaml:"s3Prefix"` + LocalBucket string `yaml:"localBucket"` + Restore SettingsRestoreConfig `yaml:"restore" validate:"required"` } type SettingsRestoreConfig struct { @@ -134,7 +183,7 @@ type SettingsRestoreConfig struct { PlatformVersion string `yaml:"platformVersion" validate:"required"` ZookeeperQuorum string `yaml:"zookeeperQuorum" validate:"required"` Job JobConfig `yaml:"job" validate:"required"` - PVC string `yaml:"pvc" validate:"required"` + PVC string `yaml:"pvc"` // Required only in legacy mode } // ClickhouseConfig holds Clickhouse-specific configuration @@ -338,6 +387,21 @@ func LoadConfig(clientset kubernetes.Interface, namespace, configMapName, secret return nil, fmt.Errorf("configuration validation failed: %w", err) } + // Custom validation: either minio or storage must be configured + if config.Minio.Service.Name == "" && config.Storage.Service.Name == "" { + return nil, fmt.Errorf("configuration validation failed: either 'minio' or 'storage' must be configured") + } + + // In legacy mode (minio), PVC is required for settings + if config.IsLegacyMode() && config.Settings.Restore.PVC == "" { + return nil, fmt.Errorf("configuration validation failed: settings.restore.pvc is required in legacy (minio) mode") + } + + // In new mode (storage), localBucket is required for settings + if !config.IsLegacyMode() && config.Settings.LocalBucket == "" { + return nil, fmt.Errorf("configuration validation failed: settings.localBucket is required in storage mode") + } + return config, nil } diff --git a/internal/orchestration/restore/resources.go b/internal/orchestration/restore/resources.go index d5128c4..b37f341 100644 --- a/internal/orchestration/restore/resources.go +++ b/internal/orchestration/restore/resources.go @@ -10,8 +10,10 @@ import ( ) const ( - // MinioKeysSecretName is the name of the secret containing Minio access/secret keys - MinioKeysSecretName = "suse-observability-backup-cli-minio-keys" //nolint:gosec // This is a Kubernetes secret name, not a credential + // StorageKeysSecretName is the name of the secret containing S3-compatible storage access/secret keys + StorageKeysSecretName = "suse-observability-backup-cli-minio-keys" //nolint:gosec // This is a Kubernetes secret name, not a credential + // MinioKeysSecretName is an alias for StorageKeysSecretName for backward compatibility + MinioKeysSecretName = StorageKeysSecretName // RestoreScriptsConfigMap is the name of the ConfigMap containing restore scripts RestoreScriptsConfigMap = "suse-observability-backup-cli-restore-scripts" ) @@ -41,19 +43,19 @@ func EnsureResources(k8sClient *k8s.Client, namespace string, config *config.Con } log.Successf("Backup scripts ConfigMap ready") - // Ensure Minio keys secret exists - log.Infof("Ensuring Minio keys secret exists...") + // Ensure storage keys secret exists (uses storage or minio credentials) + log.Infof("Ensuring storage keys secret exists...") secretData := map[string][]byte{ - "accesskey": []byte(config.Minio.AccessKey), - "secretkey": []byte(config.Minio.SecretKey), + "accesskey": []byte(config.GetStorageAccessKey()), + "secretkey": []byte(config.GetStorageSecretKey()), } secretLabels := k8s.MergeLabels(config.Kubernetes.CommonLabels, map[string]string{}) - if _, err := k8sClient.EnsureSecret(namespace, MinioKeysSecretName, secretData, secretLabels); err != nil { - return fmt.Errorf("failed to ensure Minio keys secret: %w", err) + if _, err := k8sClient.EnsureSecret(namespace, StorageKeysSecretName, secretData, secretLabels); err != nil { + return fmt.Errorf("failed to ensure storage keys secret: %w", err) } - log.Successf("Minio keys secret ready") + log.Successf("Storage keys secret ready") return nil } diff --git a/internal/scripts/scripts/restore-settings-backup.sh b/internal/scripts/scripts/restore-settings-backup.sh index ed6531f..9859454 100644 --- a/internal/scripts/scripts/restore-settings-backup.sh +++ b/internal/scripts/scripts/restore-settings-backup.sh @@ -4,22 +4,50 @@ set -Eeuo pipefail export BACKUP_DIR=/settings-backup-data export TMP_DIR=/tmp-data -RESTORE_FILE="${BACKUP_DIR}/${BACKUP_FILE}" - -if [ "$BACKUP_CONFIGURATION_UPLOAD_REMOTE" == "true" ] && [ ! -f "${RESTORE_FILE}" ]; then +setup_aws_credentials() { export AWS_ACCESS_KEY_ID AWS_ACCESS_KEY_ID="$(cat /aws-keys/accesskey)" export AWS_SECRET_ACCESS_KEY AWS_SECRET_ACCESS_KEY="$(cat /aws-keys/secretkey)" +} + +download_from_s3() { + local bucket="$1" + local prefix="$2" + local dest="$3" + echo "=== Downloading Settings backup \"${BACKUP_FILE}\" from bucket \"${bucket}\"..." + sts-toolbox aws s3 --endpoint "http://${MINIO_ENDPOINT}" --region minio cp "s3://${bucket}/${prefix}${BACKUP_FILE}" "${dest}/${BACKUP_FILE}" +} + +RESTORE_FILE="" + +if [ -n "${BACKUP_CONFIGURATION_LOCAL_BUCKET:-}" ]; then + # New mode: no PVC, download from local bucket first, fall back to remote bucket + setup_aws_credentials + + if download_from_s3 "${BACKUP_CONFIGURATION_LOCAL_BUCKET}" "" "${TMP_DIR}"; then + RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" + elif [ "${BACKUP_CONFIGURATION_UPLOAD_REMOTE}" == "true" ]; then + echo "=== Backup not found in local bucket, trying remote bucket..." + if download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}"; then + RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" + fi + fi +else + # Legacy mode: check PVC first, fall back to remote bucket + RESTORE_FILE="${BACKUP_DIR}/${BACKUP_FILE}" + + if [ "$BACKUP_CONFIGURATION_UPLOAD_REMOTE" == "true" ] && [ ! -f "${RESTORE_FILE}" ]; then + setup_aws_credentials - echo "=== Downloading Settings backup \"${BACKUP_FILE}\" from bucket \"${BACKUP_CONFIGURATION_BUCKET_NAME}\"..." - sts-toolbox aws s3 --endpoint "http://${MINIO_ENDPOINT}" --region minio cp "s3://${BACKUP_CONFIGURATION_BUCKET_NAME}/${BACKUP_CONFIGURATION_S3_PREFIX}${BACKUP_FILE}" "${TMP_DIR}/${BACKUP_FILE}" - RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" + download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}" + RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" + fi fi -if [ ! -f "${RESTORE_FILE}" ]; then -echo "=== Backup file \"${RESTORE_FILE}\" not found, exiting..." -exit 1 +if [ -z "${RESTORE_FILE}" ] || [ ! -f "${RESTORE_FILE}" ]; then + echo "=== Backup file \"${BACKUP_FILE}\" not found, exiting..." + exit 1 fi echo "=== Restoring settings backup from \"${BACKUP_FILE}\"..." From 7a16944404378a8f5809f1b397220e54367b4a14 Mon Sep 17 00:00:00 2001 From: Remco Beckers Date: Tue, 24 Feb 2026 13:04:40 +0100 Subject: [PATCH 2/4] STAC-23446 Add support for migration --- cmd/settings/list.go | 23 ++++++++++++++++++- cmd/settings/restore.go | 19 +++++++++++---- .../scripts/restore-settings-backup.sh | 5 +++- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/cmd/settings/list.go b/cmd/settings/list.go index 9c2be55..b789bc5 100644 --- a/cmd/settings/list.go +++ b/cmd/settings/list.go @@ -31,14 +31,21 @@ const ( expectedListJobContainerCount = 1 ) +// Shared flag for --from-pvc, used by both list and restore commands +var fromPVC bool + func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { - return &cobra.Command{ + cmd := &cobra.Command{ Use: "list", Short: "List available Settings backups from S3/Minio", Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runList, cmdutils.MinioIsNotRequired) }, } + + cmd.Flags().BoolVar(&fromPVC, "from-pvc", false, "List backups from legacy PVC instead of S3") + + return cmd } func runList(appCtx *app.Context) error { @@ -69,12 +76,26 @@ func runList(appCtx *app.Context) error { } // getAllBackups retrieves backups from all sources, deduplicates and sorts them by LastModified time (most recent first). +// When --from-pvc is set: only lists backups from the legacy PVC (requires settings.restore.pvc to be configured). // In legacy mode (Minio): combines S3 backups (if Minio enabled) + PVC backups. // In new mode (Storage): combines S3 backups + local bucket backups (from settings.localBucket). func getAllBackups(appCtx *app.Context) ([]BackupFileInfo, error) { var backups []BackupFileInfo var err error + // When --from-pvc is set, only list from the PVC + if fromPVC { + if appCtx.Config.Settings.Restore.PVC == "" { + return nil, fmt.Errorf("--from-pvc requires settings.restore.pvc to be configured") + } + appCtx.Logger.Infof("Listing backups from legacy PVC '%s'...", appCtx.Config.Settings.Restore.PVC) + pvcBackups, err := getBackupListFromPVC(appCtx) + if err != nil { + return nil, fmt.Errorf("failed to get list of backups from PVC: %v", err) + } + return pvcBackups, nil + } + // Get backups from S3 if storage is enabled if appCtx.Config.StorageEnabled() { if backups, err = getBackupListFromS3(appCtx); err != nil { diff --git a/cmd/settings/restore.go b/cmd/settings/restore.go index 43633b1..4371210 100644 --- a/cmd/settings/restore.go +++ b/cmd/settings/restore.go @@ -44,6 +44,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd.Flags().BoolVar(&useLatest, "latest", false, "Restore from the most recent backup") cmd.Flags().BoolVar(&background, "background", false, "Run restore job in background without waiting for completion") cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") + cmd.Flags().BoolVar(&fromPVC, "from-pvc", false, "Restore backup from legacy PVC instead of S3") cmd.MarkFlagsMutuallyExclusive("archive", "latest") cmd.MarkFlagsOneRequired("archive", "latest") @@ -51,6 +52,11 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { } func runRestore(appCtx *app.Context) error { + // Validate --from-pvc: PVC must be configured + if fromPVC && appCtx.Config.Settings.Restore.PVC == "" { + return fmt.Errorf("--from-pvc requires settings.restore.pvc to be configured") + } + // Determine which archive to restore backupFile := archiveName if useLatest { @@ -193,7 +199,10 @@ func buildEnvVar(extraEnvVar []corev1.EnvVar, config *config.Config) []corev1.En {Name: "ZOOKEEPER_QUORUM", Value: config.Settings.Restore.ZookeeperQuorum}, {Name: "BACKUP_CONFIGURATION_UPLOAD_REMOTE", Value: strconv.FormatBool(config.StorageEnabled())}, } - if config.Settings.LocalBucket != "" { + if fromPVC { + // Force PVC mode in the shell script, suppress local bucket + commonVar = append(commonVar, corev1.EnvVar{Name: "BACKUP_RESTORE_FROM_PVC", Value: "true"}) + } else if config.Settings.LocalBucket != "" { commonVar = append(commonVar, corev1.EnvVar{Name: "BACKUP_CONFIGURATION_LOCAL_BUCKET", Value: config.Settings.LocalBucket}) } commonVar = append(commonVar, extraEnvVar...) @@ -208,8 +217,8 @@ func buildVolumeMounts(config *config.Config) []corev1.VolumeMount { {Name: "minio-keys", MountPath: "/aws-keys"}, {Name: "tmp-data", MountPath: "/tmp-data"}, } - // Only mount PVC in legacy mode - if config.IsLegacyMode() { + // Mount PVC in legacy mode or when --from-pvc is set + if config.IsLegacyMode() || fromPVC { mounts = append(mounts, corev1.VolumeMount{Name: "settings-backup-data", MountPath: "/settings-backup-data"}) } return mounts @@ -254,8 +263,8 @@ func buildVolumes(config *config.Config, defaultMode int32) []corev1.Volume { }, }, } - // Only include PVC volume in legacy mode - if config.IsLegacyMode() { + // Include PVC volume in legacy mode or when --from-pvc is set + if config.IsLegacyMode() || fromPVC { volumes = append(volumes, corev1.Volume{ Name: "settings-backup-data", VolumeSource: corev1.VolumeSource{ diff --git a/internal/scripts/scripts/restore-settings-backup.sh b/internal/scripts/scripts/restore-settings-backup.sh index 9859454..68b95c4 100644 --- a/internal/scripts/scripts/restore-settings-backup.sh +++ b/internal/scripts/scripts/restore-settings-backup.sh @@ -21,7 +21,10 @@ download_from_s3() { RESTORE_FILE="" -if [ -n "${BACKUP_CONFIGURATION_LOCAL_BUCKET:-}" ]; then +if [ "${BACKUP_RESTORE_FROM_PVC:-}" == "true" ]; then + # --from-pvc mode: use legacy PVC directly, no S3 fallback + RESTORE_FILE="${BACKUP_DIR}/${BACKUP_FILE}" +elif [ -n "${BACKUP_CONFIGURATION_LOCAL_BUCKET:-}" ]; then # New mode: no PVC, download from local bucket first, fall back to remote bucket setup_aws_credentials From 47b1bde2d35f7080ff662214217e2333cb9cc6c0 Mon Sep 17 00:00:00 2001 From: Remco Beckers Date: Thu, 26 Feb 2026 10:04:22 +0100 Subject: [PATCH 3/4] STAC-23466 Proper check for global backup enabled --- internal/foundation/config/config.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/foundation/config/config.go b/internal/foundation/config/config.go index b19da10..ee582cf 100644 --- a/internal/foundation/config/config.go +++ b/internal/foundation/config/config.go @@ -38,7 +38,7 @@ func (c *Config) StorageEnabled() bool { if c.IsLegacyMode() { return c.Minio.Enabled } - return c.Storage.Service.Name != "" + return c.Storage.GlobalBackupEnabled } // GetStorageService returns the service config for the S3-compatible storage, @@ -128,9 +128,10 @@ type MinioConfig struct { // StorageConfig holds S3-compatible storage configuration (new mode, replaces Minio) type StorageConfig struct { - Service ServiceConfig `yaml:"service" validate:"omitempty"` - AccessKey string `yaml:"accessKey"` // From secret - SecretKey string `yaml:"secretKey"` // From secret + GlobalBackupEnabled bool `yaml:"globalBackupEnabled" validate:"boolean"` + Service ServiceConfig `yaml:"service" validate:"omitempty"` + AccessKey string `yaml:"accessKey"` // From secret + SecretKey string `yaml:"secretKey"` // From secret } // StackgraphConfig holds Stackgraph backup-specific configuration From c9d6686754227065b2bc7d6023e310f53c64292f Mon Sep 17 00:00:00 2001 From: Remco Beckers Date: Thu, 5 Mar 2026 16:53:24 +0100 Subject: [PATCH 4/4] STAC-23466 Cleanup minio references where possible Add tests for new storage config --- cmd/clickhouse/check_and_finalize.go | 2 +- cmd/clickhouse/list.go | 2 +- cmd/clickhouse/restore.go | 2 +- cmd/cmdutils/common.go | 5 - cmd/elasticsearch/check_and_finalize.go | 2 +- cmd/elasticsearch/configure.go | 2 +- cmd/elasticsearch/configure_test.go | 156 ++++++++ cmd/elasticsearch/list-indices.go | 2 +- cmd/elasticsearch/list.go | 2 +- cmd/elasticsearch/list_indices_test.go | 64 ++++ cmd/elasticsearch/list_test.go | 150 ++++++++ cmd/elasticsearch/restore.go | 2 +- cmd/settings/check_and_finalize.go | 2 +- cmd/settings/list.go | 4 +- cmd/settings/restore.go | 4 +- cmd/stackgraph/list.go | 2 +- cmd/stackgraph/restore.go | 2 +- cmd/victoriametrics/list.go | 2 +- cmd/victoriametrics/restore.go | 2 +- internal/clients/s3/client_test.go | 24 +- internal/clients/s3/interface_test.go | 4 +- internal/foundation/config/config.go | 5 +- internal/foundation/config/config_test.go | 359 ++++++++++++++++++ .../testdata/validStorageConfigMapConfig.yaml | 181 +++++++++ .../testdata/validStorageConfigMapOnly.yaml | 171 +++++++++ .../testdata/validStorageSecretConfig.yaml | 21 + 26 files changed, 1136 insertions(+), 38 deletions(-) create mode 100644 internal/foundation/config/testdata/validStorageConfigMapConfig.yaml create mode 100644 internal/foundation/config/testdata/validStorageConfigMapOnly.yaml create mode 100644 internal/foundation/config/testdata/validStorageSecretConfig.yaml diff --git a/cmd/clickhouse/check_and_finalize.go b/cmd/clickhouse/check_and_finalize.go index 665a561..195fc55 100644 --- a/cmd/clickhouse/check_and_finalize.go +++ b/cmd/clickhouse/check_and_finalize.go @@ -33,7 +33,7 @@ func checkAndFinalizeCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { This command is useful when a restore was started without --wait flag or was interrupted. It will check the restore status and if complete, execute post-restore tasks and scale up resources.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.StorageIsRequired) }, } diff --git a/cmd/clickhouse/list.go b/cmd/clickhouse/list.go index 05b0e4c..729a999 100644 --- a/cmd/clickhouse/list.go +++ b/cmd/clickhouse/list.go @@ -18,7 +18,7 @@ func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "List available Clickhouse backups", Long: `List all Clickhouse backups from the ClickHouse Backup API.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runList, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runList, cmdutils.StorageIsRequired) }, } } diff --git a/cmd/clickhouse/restore.go b/cmd/clickhouse/restore.go index 9e6d820..1ed0368 100644 --- a/cmd/clickhouse/restore.go +++ b/cmd/clickhouse/restore.go @@ -27,7 +27,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "Restore ClickHouse from a backup archive", Long: `Restore ClickHouse data from a backup archive via ClickHouse Backup API. Waits for completion by default; use --background to run asynchronously.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runRestore, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, } diff --git a/cmd/cmdutils/common.go b/cmd/cmdutils/common.go index d0e5772..8e4131e 100644 --- a/cmd/cmdutils/common.go +++ b/cmd/cmdutils/common.go @@ -11,11 +11,6 @@ import ( const ( StorageIsRequired bool = true StorageIsNotRequired bool = false - - // MinioIsRequired is deprecated: use StorageIsRequired instead - MinioIsRequired = StorageIsRequired - // MinioIsNotRequired is deprecated: use StorageIsNotRequired instead - MinioIsNotRequired = StorageIsNotRequired ) func Run(globalFlags *config.CLIGlobalFlags, runFunc func(ctx *app.Context) error, storageRequired bool) { diff --git a/cmd/elasticsearch/check_and_finalize.go b/cmd/elasticsearch/check_and_finalize.go index 6cd028a..e80a3fe 100644 --- a/cmd/elasticsearch/check_and_finalize.go +++ b/cmd/elasticsearch/check_and_finalize.go @@ -25,7 +25,7 @@ func checkAndFinalizeCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Long: `Check the status of a restore operation and perform finalization (scale up deployments) if complete. If the restore is still running and --wait is specified, wait for completion before finalizing.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.StorageIsRequired) }, } diff --git a/cmd/elasticsearch/configure.go b/cmd/elasticsearch/configure.go index c56e3c2..d475839 100644 --- a/cmd/elasticsearch/configure.go +++ b/cmd/elasticsearch/configure.go @@ -16,7 +16,7 @@ func configureCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "Configure Elasticsearch snapshot repository and SLM policy", Long: `Configure Elasticsearch snapshot repository and Snapshot Lifecycle Management (SLM) policy for automated backups.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runConfigure, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runConfigure, cmdutils.StorageIsRequired) }, } } diff --git a/cmd/elasticsearch/configure_test.go b/cmd/elasticsearch/configure_test.go index b294c0e..1bad3cd 100644 --- a/cmd/elasticsearch/configure_test.go +++ b/cmd/elasticsearch/configure_test.go @@ -269,6 +269,162 @@ minio: } } +// TestConfigureCmd_StorageIntegration tests the integration with Kubernetes client using StorageConfig +// +//nolint:funlen +func TestConfigureCmd_StorageIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + tests := []struct { + name string + configData string + secretData string + expectError bool + errorContains string + }{ + { + name: "successful configuration with complete data (storage mode)", + configData: ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: storage:9000 + basepath: snapshots + accessKey: test-key + secretKey: test-secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +` + minimalStorageStackgraphConfig, + secretData: "", + expectError: false, + }, + { + name: "missing credentials in config with secret override (storage mode)", + configData: ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: storage:9000 + basepath: snapshots + accessKey: "" + secretKey: "" + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +` + minimalStorageStackgraphConfig, + secretData: ` +elasticsearch: + snapshotRepository: + accessKey: secret-key + secretKey: secret-value +storage: + accessKey: secret-storage-key + secretKey: secret-storage-value +`, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := fake.NewClientset() + + // Create ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": tt.configData, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret if provided + if tt.secretData != "" { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSecretName, + Namespace: testNamespace, + }, + Data: map[string][]byte{ + "config": []byte(tt.secretData), + }, + } + _, err := fakeClient.CoreV1().Secrets(testNamespace).Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + } + + // Test that config loading works + secretName := "" + if tt.secretData != "" { + secretName = testSecretName + } + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, secretName) + + if tt.expectError { + assert.Error(t, err) + if tt.errorContains != "" { + assert.Contains(t, err.Error(), tt.errorContains) + } + } else { + require.NoError(t, err) + assert.NotNil(t, cfg) + // Verify storage mode + assert.False(t, cfg.IsLegacyMode()) + assert.True(t, cfg.StorageEnabled()) + assert.NotEmpty(t, cfg.Elasticsearch.SnapshotRepository.AccessKey) + assert.NotEmpty(t, cfg.Elasticsearch.SnapshotRepository.SecretKey) + } + }) + } +} + // TestMockESClientForConfigure demonstrates mock usage for configure // //nolint:funlen // Table-driven test diff --git a/cmd/elasticsearch/list-indices.go b/cmd/elasticsearch/list-indices.go index cc931dc..866eb93 100644 --- a/cmd/elasticsearch/list-indices.go +++ b/cmd/elasticsearch/list-indices.go @@ -16,7 +16,7 @@ func listIndicesCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Use: "list-indices", Short: "List Elasticsearch indices", Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runListIndices, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runListIndices, cmdutils.StorageIsRequired) }, } } diff --git a/cmd/elasticsearch/list.go b/cmd/elasticsearch/list.go index 0c548c6..7aa47dc 100644 --- a/cmd/elasticsearch/list.go +++ b/cmd/elasticsearch/list.go @@ -17,7 +17,7 @@ func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Use: "list", Short: "List available Elasticsearch snapshots", Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runListSnapshots, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runListSnapshots, cmdutils.StorageIsRequired) }, } } diff --git a/cmd/elasticsearch/list_indices_test.go b/cmd/elasticsearch/list_indices_test.go index 6b0bd21..800e35b 100644 --- a/cmd/elasticsearch/list_indices_test.go +++ b/cmd/elasticsearch/list_indices_test.go @@ -144,6 +144,70 @@ elasticsearch: assert.Equal(t, 9200, cfg.Elasticsearch.Service.Port) } +// TestListIndicesCmd_StorageIntegration tests the integration with Kubernetes client using StorageConfig +func TestListIndicesCmd_StorageIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create fake Kubernetes client + fakeClient := fake.NewClientset() + + // Create ConfigMap with valid config using StorageConfig instead of MinioConfig + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: storage:9000 + basepath: snapshots + accessKey: key + secretKey: secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +` + minimalStorageStackgraphConfig, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Test that config loading works + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, "") + require.NoError(t, err) + assert.Equal(t, "elasticsearch-master", cfg.Elasticsearch.Service.Name) + assert.Equal(t, 9200, cfg.Elasticsearch.Service.Port) + // Verify storage mode + assert.False(t, cfg.IsLegacyMode()) + assert.True(t, cfg.StorageEnabled()) + assert.Equal(t, "storage", cfg.GetStorageService().Name) +} + // TestMockESClientForIndices demonstrates mock usage for indices func TestMockESClientForIndices(t *testing.T) { tests := []struct { diff --git a/cmd/elasticsearch/list_test.go b/cmd/elasticsearch/list_test.go index 81684d6..865e90b 100644 --- a/cmd/elasticsearch/list_test.go +++ b/cmd/elasticsearch/list_test.go @@ -107,6 +107,92 @@ clickhouse: scaleDownLabelSelector: "app=clickhouse" ` +// minimalStorageStackgraphConfig provides the required Storage and Stackgraph configuration for tests (new mode) +const minimalStorageStackgraphConfig = ` +storage: + globalBackupEnabled: true + service: + name: storage + port: 9000 + localPortForwardPort: 9000 + accessKey: storageadmin + secretKey: storageadmin +stackgraph: + bucket: stackgraph-bucket + multipartArchive: true + restore: + scaleDownLabelSelector: "app=stackgraph" + loggingConfigConfigMap: logging-config + zookeeperQuorum: "zookeeper:2181" + job: + image: backup:latest + waitImage: wait:latest + resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "1" + memory: "2Gi" + pvc: + size: "10Gi" +victoriaMetrics: + S3Locations: + - bucket: vm-backup + prefix: victoria-metrics-0 + - bucket: vm-backup + prefix: victoria-metrics-1 + restore: + haMode: "mirror" + persistentVolumeClaimPrefix: "database-victoria-metrics-" + scaleDownLabelSelector: "app=victoria-metrics" + job: + image: vm-backup:latest + waitImage: wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" +settings: + bucket: sts-settings-backup + s3Prefix: "" + localBucket: sts-settings-local-backup + restore: + scaleDownLabelSelector: "app=settings" + loggingConfigConfigMap: logging-config + baseUrl: "http://server:7070" + receiverBaseUrl: "http://receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "zookeeper:2181" + job: + image: settings-backup:latest + waitImage: wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" +clickhouse: + service: + name: "clickhouse" + port: 9000 + localPortForwardPort: 9000 + backupService: + name: "clickhouse" + port: 7171 + localPortForwardPort: 7171 + database: "default" + username: "default" + password: "password" + restore: + scaleDownLabelSelector: "app=clickhouse" +` + // mockESClient is a simple mock for testing commands type mockESClient struct { snapshots []elasticsearch.Snapshot @@ -218,6 +304,70 @@ elasticsearch: assert.Equal(t, "elasticsearch-master", cfg.Elasticsearch.Service.Name) } +// TestListCmd_StorageIntegration tests the full command flow with new StorageConfig +func TestListCmd_StorageIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create fake Kubernetes client + fakeClient := fake.NewClientset() + + // Create ConfigMap with valid config using StorageConfig instead of MinioConfig + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: storage:9000 + basepath: snapshots + accessKey: key + secretKey: secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +` + minimalStorageStackgraphConfig, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Test that config loading works + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, "") + require.NoError(t, err) + assert.Equal(t, "backup-repo", cfg.Elasticsearch.Restore.Repository) + assert.Equal(t, "elasticsearch-master", cfg.Elasticsearch.Service.Name) + // Verify storage mode + assert.False(t, cfg.IsLegacyMode()) + assert.True(t, cfg.StorageEnabled()) + assert.Equal(t, "storage", cfg.GetStorageService().Name) +} + // TestListCmd_Unit demonstrates a unit-style test // This test focuses on the command structure and basic behavior func TestListCmd_Unit(t *testing.T) { diff --git a/cmd/elasticsearch/restore.go b/cmd/elasticsearch/restore.go index 6b1c88b..32d79f4 100644 --- a/cmd/elasticsearch/restore.go +++ b/cmd/elasticsearch/restore.go @@ -38,7 +38,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { Short: "Restore Elasticsearch from a snapshot", Long: `Restore Elasticsearch indices from a snapshot. Deletes existing STS indices before restore. Waits for completion by default; use --background to run asynchronously.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runRestore, cmdutils.MinioIsRequired) + cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }} cmd.Flags().StringVarP(&snapshotName, "snapshot", "s", "", "Snapshot name to restore (mutually exclusive with --latest)") diff --git a/cmd/settings/check_and_finalize.go b/cmd/settings/check_and_finalize.go index c923fb5..ced0ded 100644 --- a/cmd/settings/check_and_finalize.go +++ b/cmd/settings/check_and_finalize.go @@ -31,7 +31,7 @@ Examples: # Wait for job completion and cleanup sts-backup settings check-and-finalize --job settings-restore-20250128t143000 --wait -n my-namespace`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.MinioIsNotRequired) + cmdutils.Run(globalFlags, runCheckAndFinalize, cmdutils.StorageIsNotRequired) }, } diff --git a/cmd/settings/list.go b/cmd/settings/list.go index b789bc5..253eee7 100644 --- a/cmd/settings/list.go +++ b/cmd/settings/list.go @@ -37,9 +37,9 @@ var fromPVC bool func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "list", - Short: "List available Settings backups from S3/Minio", + Short: "List available Settings backups from S3", Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runList, cmdutils.MinioIsNotRequired) + cmdutils.Run(globalFlags, runList, cmdutils.StorageIsNotRequired) }, } diff --git a/cmd/settings/restore.go b/cmd/settings/restore.go index 4371210..f8ee4da 100644 --- a/cmd/settings/restore.go +++ b/cmd/settings/restore.go @@ -34,9 +34,9 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "restore", Short: "Restore Settings from a backup archive", - Long: `Restore Settings data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, + Long: `Restore Settings data from a backup archive stored in S3. Can use --latest or --archive to specify which backup to restore.`, Run: func(_ *cobra.Command, _ []string) { - cmdutils.Run(globalFlags, runRestore, cmdutils.MinioIsNotRequired) + cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsNotRequired) }, } diff --git a/cmd/stackgraph/list.go b/cmd/stackgraph/list.go index 9f309c6..6350223 100644 --- a/cmd/stackgraph/list.go +++ b/cmd/stackgraph/list.go @@ -20,7 +20,7 @@ import ( func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { return &cobra.Command{ Use: "list", - Short: "List available Stackgraph backups from S3/Minio", + Short: "List available Stackgraph backups from S3", Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runList, cmdutils.StorageIsRequired) }, diff --git a/cmd/stackgraph/restore.go b/cmd/stackgraph/restore.go index 157038d..c4b6f1a 100644 --- a/cmd/stackgraph/restore.go +++ b/cmd/stackgraph/restore.go @@ -41,7 +41,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "restore", Short: "Restore Stackgraph from a backup archive", - Long: `Restore Stackgraph data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, + Long: `Restore Stackgraph data from a backup archive stored in S3. Can use --latest or --archive to specify which backup to restore.`, Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, diff --git a/cmd/victoriametrics/list.go b/cmd/victoriametrics/list.go index adad31c..4042665 100644 --- a/cmd/victoriametrics/list.go +++ b/cmd/victoriametrics/list.go @@ -25,7 +25,7 @@ const ( func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { return &cobra.Command{ Use: "list", - Short: "List available VictoriaMetrics backups from S3/Minio", + Short: "List available VictoriaMetrics backups from S3", Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runList, cmdutils.StorageIsRequired) }, diff --git a/cmd/victoriametrics/restore.go b/cmd/victoriametrics/restore.go index 878cbf0..6c070ef 100644 --- a/cmd/victoriametrics/restore.go +++ b/cmd/victoriametrics/restore.go @@ -38,7 +38,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "restore", Short: "Restore VictoriaMetrics from a backup archive", - Long: `Restore VictoriaMetrics data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, + Long: `Restore VictoriaMetrics data from a backup archive stored in S3. Can use --latest or --archive to specify which backup to restore.`, Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, diff --git a/internal/clients/s3/client_test.go b/internal/clients/s3/client_test.go index 2535699..aee3e14 100644 --- a/internal/clients/s3/client_test.go +++ b/internal/clients/s3/client_test.go @@ -17,10 +17,10 @@ func TestNewClient(t *testing.T) { expectError bool }{ { - name: "valid minio configuration", - endpoint: "http://minio:9000", - accessKey: "minioadmin", - secretKey: "minioadmin", + name: "valid configuration", + endpoint: "http://s3proxy:9000", + accessKey: "access-admin", + secretKey: "secret-admin", expectError: false, }, { @@ -53,7 +53,7 @@ func TestNewClient(t *testing.T) { }, { name: "empty credentials", - endpoint: "http://minio:9000", + endpoint: "http://s3proxy:9000", accessKey: "", secretKey: "", expectError: false, // Client creation succeeds, but operations will fail @@ -77,7 +77,7 @@ func TestNewClient(t *testing.T) { // TestNewClient_ClientConfiguration tests that the client is configured correctly func TestNewClient_ClientConfiguration(t *testing.T) { - endpoint := "http://test-minio:9000" + endpoint := "http://test-s3proxy:9000" accessKey := "test-access" secretKey := "test-secret" @@ -152,7 +152,7 @@ func TestNewClient_CredentialFormats(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - client, err := NewClient("http://minio:9000", tt.accessKey, tt.secretKey) + client, err := NewClient("http://s3proxy:9000", tt.accessKey, tt.secretKey) // Client creation should succeed regardless of credential format // The credentials will be validated when actual S3 operations are performed @@ -170,7 +170,7 @@ func TestNewClient_EndpointFormats(t *testing.T) { }{ { name: "endpoint with http scheme", - endpoint: "http://minio.example.com:9000", + endpoint: "http://s3proxy.example.com:9000", }, { name: "endpoint with https scheme", @@ -178,15 +178,15 @@ func TestNewClient_EndpointFormats(t *testing.T) { }, { name: "endpoint without port", - endpoint: "http://minio.local", + endpoint: "http://s3proxy.local", }, { name: "endpoint with non-standard port", - endpoint: "http://minio:8080", + endpoint: "http://s3proxy:8080", }, { name: "endpoint with path", - endpoint: "http://minio:9000/path/to/s3", + endpoint: "http://s3proxy:9000/path/to/s3", }, { name: "endpoint as IP address", @@ -218,7 +218,7 @@ func TestNewClient_ConcurrentCreation(t *testing.T) { for i := 0; i < numGoroutines; i++ { go func() { - _, err := NewClient("http://minio:9000", "access", "secret") + _, err := NewClient("http://s3proxy:9000", "access", "secret") if err != nil { errors <- err } diff --git a/internal/clients/s3/interface_test.go b/internal/clients/s3/interface_test.go index 053354d..da50397 100644 --- a/internal/clients/s3/interface_test.go +++ b/internal/clients/s3/interface_test.go @@ -16,7 +16,7 @@ func TestClientImplementsInterface(_ *testing.T) { // TestInterfaceContract verifies that Client correctly wraps AWS S3 client methods func TestInterfaceContract(t *testing.T) { // Create a client - client, err := NewClient("http://test-minio:9000", "test-access", "test-secret") + client, err := NewClient("http://test-s3proxy:9000", "test-access", "test-secret") assert.NoError(t, err) assert.NotNil(t, client) @@ -28,7 +28,7 @@ func TestInterfaceContract(t *testing.T) { // TestClientMethods verifies that all interface methods are implemented // Note: These tests don't call real S3 - they just verify the methods exist func TestClientMethods(t *testing.T) { - client, err := NewClient("http://test-minio:9000", "test-access", "test-secret") + client, err := NewClient("http://test-s3proxy:9000", "test-access", "test-secret") assert.NoError(t, err) require := assert.New(t) diff --git a/internal/foundation/config/config.go b/internal/foundation/config/config.go index ee582cf..d8c1cc8 100644 --- a/internal/foundation/config/config.go +++ b/internal/foundation/config/config.go @@ -33,12 +33,13 @@ func (c *Config) IsLegacyMode() bool { } // StorageEnabled returns true when S3-compatible storage is available, -// either through legacy Minio (with Enabled=true) or new Storage config. +// either through legacy Minio (with Enabled=true) or when the new Storage config is used the +// storage is always enabled func (c *Config) StorageEnabled() bool { if c.IsLegacyMode() { return c.Minio.Enabled } - return c.Storage.GlobalBackupEnabled + return true } // GetStorageService returns the service config for the S3-compatible storage, diff --git a/internal/foundation/config/config_test.go b/internal/foundation/config/config_test.go index 48a13a1..ba4fa4d 100644 --- a/internal/foundation/config/config_test.go +++ b/internal/foundation/config/config_test.go @@ -59,6 +59,49 @@ func TestLoadConfig_FromConfigMapOnly(t *testing.T) { assert.Equal(t, "sts-backup", config.Elasticsearch.SnapshotRepository.Name) assert.Equal(t, "configmap-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) assert.Equal(t, "configmap-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) + // Verify legacy mode + assert.True(t, config.IsLegacyMode()) + assert.True(t, config.StorageEnabled()) +} + +func TestLoadConfig_Storage_FromConfigMapOnly(t *testing.T) { + fakeClient := fake.NewClientset() + validConfigYAML := loadTestData(t, "validStorageConfigMapOnly.yaml") + + // Create ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "") + + // Assertions + require.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + assert.Equal(t, 9200, config.Elasticsearch.Service.Port) + assert.Equal(t, "sts-backup", config.Elasticsearch.SnapshotRepository.Name) + assert.Equal(t, "configmap-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "configmap-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) + // Verify new storage mode (not legacy) + assert.False(t, config.IsLegacyMode()) + assert.True(t, config.StorageEnabled()) + // Verify storage accessor methods return storage config values + assert.Equal(t, "suse-observability-storage", config.GetStorageService().Name) + assert.Equal(t, 9000, config.GetStorageService().Port) + assert.Equal(t, "storageadmin", config.GetStorageAccessKey()) + assert.Equal(t, "storageadmin", config.GetStorageSecretKey()) } func TestLoadConfig_CompleteConfiguration(t *testing.T) { @@ -136,6 +179,91 @@ func TestLoadConfig_CompleteConfiguration(t *testing.T) { assert.Equal(t, 30, config.Elasticsearch.SLM.RetentionMaxCount) } +func TestLoadConfig_Storage_CompleteConfiguration(t *testing.T) { + fakeClient := fake.NewClientset() + validConfigYAML := loadTestData(t, "validStorageConfigMapConfig.yaml") + secretOverrideYAML := loadTestData(t, "validStorageSecretConfig.yaml") + + // Create ConfigMap with non-sensitive configuration + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret with sensitive credentials + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-secret", + Namespace: "test-ns", + }, + Data: map[string][]byte{ + "config": []byte(secretOverrideYAML), + }, + } + _, err = fakeClient.CoreV1().Secrets("test-ns").Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config - production pattern: ConfigMap + Secret + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "backup-secret") + + // Comprehensive assertions + require.NoError(t, err) + assert.NotNil(t, config) + + // Verify new storage mode (not legacy) + assert.False(t, config.IsLegacyMode()) + assert.True(t, config.StorageEnabled()) + + // Service config + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + assert.Equal(t, 9200, config.Elasticsearch.Service.Port) + assert.Equal(t, 9200, config.Elasticsearch.Service.LocalPortForwardPort) + + // Restore config + assert.Equal(t, "observability.suse.com/scalable-during-es-restore=true", config.Elasticsearch.Restore.ScaleDownLabelSelector) + assert.Equal(t, "sts", config.Elasticsearch.Restore.IndexPrefix) + assert.Equal(t, ".ds-sts_k8s_logs", config.Elasticsearch.Restore.DatastreamIndexPrefix) + assert.Equal(t, "sts_k8s_logs", config.Elasticsearch.Restore.DatastreamName) + assert.Equal(t, "sts*,.ds-sts_k8s_logs*", config.Elasticsearch.Restore.IndicesPattern) + assert.Equal(t, "sts-backup", config.Elasticsearch.Restore.Repository) + + // Snapshot repository config + assert.Equal(t, "sts-backup", config.Elasticsearch.SnapshotRepository.Name) + assert.Equal(t, "sts-elasticsearch-backup", config.Elasticsearch.SnapshotRepository.Bucket) + assert.Equal(t, "suse-observability-storage:9000", config.Elasticsearch.SnapshotRepository.Endpoint) + assert.Equal(t, "", config.Elasticsearch.SnapshotRepository.BasePath) + // Credentials come from Secret + assert.Equal(t, "secret-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "secret-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) + + // Storage accessor methods should return secret-overridden values + assert.Equal(t, "suse-observability-storage", config.GetStorageService().Name) + assert.Equal(t, 9000, config.GetStorageService().Port) + assert.Equal(t, "secret-storage-access-key", config.GetStorageAccessKey()) + assert.Equal(t, "secret-storage-secret-key", config.GetStorageSecretKey()) + + // SLM config + assert.Equal(t, "auto-sts-backup", config.Elasticsearch.SLM.Name) + assert.Equal(t, "0 0 3 * * ?", config.Elasticsearch.SLM.Schedule) + assert.Equal(t, "", config.Elasticsearch.SLM.SnapshotTemplateName) + assert.Equal(t, "sts-backup", config.Elasticsearch.SLM.Repository) + assert.Equal(t, "sts*", config.Elasticsearch.SLM.Indices) + assert.Equal(t, "30d", config.Elasticsearch.SLM.RetentionExpireAfter) + assert.Equal(t, 5, config.Elasticsearch.SLM.RetentionMinCount) + assert.Equal(t, 30, config.Elasticsearch.SLM.RetentionMaxCount) +} + func TestLoadConfig_WithSecretOverride(t *testing.T) { fakeClient := fake.NewClientset() validConfigYAML := loadTestData(t, "validConfigMapOnly.yaml") @@ -183,6 +311,57 @@ func TestLoadConfig_WithSecretOverride(t *testing.T) { assert.Equal(t, "secret-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) } +func TestLoadConfig_Storage_WithSecretOverride(t *testing.T) { + fakeClient := fake.NewClientset() + validConfigYAML := loadTestData(t, "validStorageConfigMapOnly.yaml") + secretOverrideYAML := loadTestData(t, "validStorageSecretConfig.yaml") + + // Create ConfigMap with credentials + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret with different credentials + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-secret", + Namespace: "test-ns", + }, + Data: map[string][]byte{ + "config": []byte(secretOverrideYAML), + }, + } + _, err = fakeClient.CoreV1().Secrets("test-ns").Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "backup-secret") + + // Assertions - Secret should override ConfigMap credentials + require.NoError(t, err) + assert.NotNil(t, config) + assert.False(t, config.IsLegacyMode()) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + // Verify Secret overrides ConfigMap: secret-access-key overrides configmap-access-key + assert.Equal(t, "secret-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "secret-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) + // Verify Secret overrides storage credentials + assert.Equal(t, "secret-storage-access-key", config.GetStorageAccessKey()) + assert.Equal(t, "secret-storage-secret-key", config.GetStorageSecretKey()) +} + func TestLoadConfig_ConfigMapNotFound(t *testing.T) { fakeClient := fake.NewClientset() @@ -305,6 +484,36 @@ func TestLoadConfig_SecretNotFoundWarning(t *testing.T) { assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) } +func TestLoadConfig_Storage_SecretNotFoundWarning(t *testing.T) { + fakeClient := fake.NewClientset() + validConfigYAML := loadTestData(t, "validStorageConfigMapOnly.yaml") + + // Create only ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config with non-existent secret (should succeed with warning) + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "nonexistent-secret") + + // Assertions - should succeed as secret is optional + require.NoError(t, err) + assert.NotNil(t, config) + assert.False(t, config.IsLegacyMode()) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + assert.Equal(t, "suse-observability-storage", config.GetStorageService().Name) +} + func TestLoadConfig_EmptyConfigMapName(t *testing.T) { fakeClient := fake.NewClientset() @@ -474,6 +683,156 @@ func TestConfig_StructValidation(t *testing.T) { }, expectError: false, }, + { + name: "valid config with storage", + config: &Config{ + Elasticsearch: ElasticsearchConfig{ + Service: ServiceConfig{ + Name: "es-master", + Port: 9200, + LocalPortForwardPort: 9200, + }, + Restore: RestoreConfig{ + ScaleDownLabelSelector: "app=test", + IndexPrefix: "sts_", + DatastreamIndexPrefix: "sts_k8s", + DatastreamName: "sts_k8s", + IndicesPattern: "*", + Repository: "repo", + }, + SnapshotRepository: SnapshotRepositoryConfig{ + Name: "repo", + Bucket: "bucket", + Endpoint: "endpoint", + AccessKey: "key", + SecretKey: "secret", + }, + SLM: SLMConfig{ + Name: "slm", + Schedule: "0 0 * * *", + SnapshotTemplateName: "snap", + Repository: "repo", + Indices: "*", + RetentionExpireAfter: "30d", + RetentionMinCount: 1, + RetentionMaxCount: 10, + }, + }, + Storage: StorageConfig{ + GlobalBackupEnabled: true, + Service: ServiceConfig{ + Name: "storage", + Port: 9000, + LocalPortForwardPort: 9000, + }, + AccessKey: "storageadmin", + SecretKey: "storageadmin", + }, + Stackgraph: StackgraphConfig{ + Bucket: "stackgraph-bucket", + S3Prefix: "", + MultipartArchive: true, + Restore: StackgraphRestoreConfig{ + ScaleDownLabelSelector: "app=stackgraph", + LoggingConfigConfigMapName: "logging-config", + ZookeeperQuorum: "zookeeper:2181", + Job: JobConfig{ + Image: "backup:latest", + WaitImage: "wait:latest", + Resources: ResourceRequirements{ + Limits: ResourceList{ + CPU: "2", + Memory: "4Gi", + }, + Requests: ResourceList{ + CPU: "1", + Memory: "2Gi", + }, + }, + }, + PVC: PVCConfig{ + Size: "10Gi", + }, + }, + }, + VictoriaMetrics: VictoriaMetricsConfig{ + S3Locations: []S3Location{ + { + Bucket: "vm-backup", + Prefix: "victoria-metrics-0", + }, + { + Bucket: "vm-backup", + Prefix: "victoria-metrics-1", + }, + }, + Restore: VictoriaMetricsRestoreConfig{ + HaMode: "mirror", + PersistentVolumeClaimPrefix: "database-victoria-metrics-", + ScaleDownLabelSelector: "app=victoria-metrics", + Job: JobConfig{ + Image: "vm-backup:latest", + WaitImage: "wait:latest", + Resources: ResourceRequirements{ + Limits: ResourceList{ + CPU: "1", + Memory: "2Gi", + }, + Requests: ResourceList{ + CPU: "500m", + Memory: "1Gi", + }, + }, + }, + }, + }, + Settings: SettingsConfig{ + Bucket: "settings-backup", + S3Prefix: "", + Restore: SettingsRestoreConfig{ + ScaleDownLabelSelector: "app=settings", + LoggingConfigConfigMapName: "logging-config", + BaseURL: "http://server:7070", + ReceiverBaseURL: "http://receiver:7077", + PlatformVersion: "5.2.0", + ZookeeperQuorum: "zookeeper:2181", + Job: JobConfig{ + Image: "settings-backup:latest", + WaitImage: "wait:latest", + Resources: ResourceRequirements{ + Limits: ResourceList{ + CPU: "1", + Memory: "2Gi", + }, + Requests: ResourceList{ + CPU: "500m", + Memory: "1Gi", + }, + }, + }, + }, + }, + Clickhouse: ClickhouseConfig{ + Service: ServiceConfig{ + Name: "clickhouse", + Port: 9000, + LocalPortForwardPort: 9000, + }, + BackupService: ServiceConfig{ + Name: "clickhouse", + Port: 7171, + LocalPortForwardPort: 7171, + }, + Database: "default", + Username: "default", + Password: "password", + Restore: ClickhouseRestoreConfig{ + ScaleDownLabelSelector: "app=clickhouse", + }, + }, + }, + expectError: false, + }, { name: "invalid port number", config: &Config{ diff --git a/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml b/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml new file mode 100644 index 0000000..5b928f8 --- /dev/null +++ b/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml @@ -0,0 +1,181 @@ +# Valid ConfigMap Configuration for SUSE Observability Backup CLI (Storage mode) +# This file contains the main configuration using the new StorageConfig (non-legacy mode). +# It is typically stored in a Kubernetes ConfigMap. + +elasticsearch: + # Snapshot repository configuration for S3-compatible storage + snapshotRepository: + # Name of the Elasticsearch snapshot repository + name: sts-backup + # S3 bucket name where snapshots will be stored + bucket: sts-elasticsearch-backup + # S3 endpoint (hostname:port) + endpoint: suse-observability-storage:9000 + # Base path within the bucket for snapshots (empty string for root) + basepath: "" + + # Snapshot Lifecycle Management (SLM) policy configuration + # SLM automates snapshot creation on a schedule + slm: + # Name of the SLM policy + name: auto-sts-backup + # Cron schedule for automatic snapshots (Quartz format: "second minute hour day month weekday") + # Example: "0 0 3 * * ?" = daily at 3:00 AM + schedule: "0 0 3 * * ?" + # Template for snapshot names (supports Elasticsearch date math) + # Example: "" creates snapshots like "sts-backup-20240115-0300" + snapshotTemplateName: "" + # Repository to store snapshots (must match snapshotRepository.name) + repository: sts-backup + # Indices pattern to include in snapshots (glob pattern) + indices: "sts*" + # Retention policy: delete snapshots older than this duration (e.g., 30d, 7d, 90d) + retentionExpireAfter: 30d + # Retention policy: minimum number of snapshots to keep (even if expired) + retentionMinCount: 5 + # Retention policy: maximum number of snapshots to keep + retentionMaxCount: 30 + + # Elasticsearch service connection details + service: + # Name of the Elasticsearch service in Kubernetes + name: suse-observability-elasticsearch-master-headless + # Port number for Elasticsearch HTTP API + port: 9200 + # Local port to use for port-forwarding (can be same as port) + localPortForwardPort: 9200 + + # Restore operation configuration + restore: + # Snapshot repository to restore from (must match snapshotRepository.name) + repository: sts-backup + # Kubernetes label selector for deployments to scale down during restore + # Example: "observability.suse.com/scalable-during-es-restore=true" + scaleDownLabelSelector: "observability.suse.com/scalable-during-es-restore=true" + # Prefix for regular indices to filter during restore operations + indexPrefix: sts + # Prefix for datastream indices (datastreams use pattern: .ds-{name}-{generation}) + datastreamIndexPrefix: .ds-sts_k8s_logs + # Name of the datastream (used for rollover operations) + datastreamName: sts_k8s_logs + # Pattern for indices to restore from snapshot (comma-separated glob patterns) + indicesPattern: sts*,.ds-sts_k8s_logs* + +# Storage configuration for S3-compatible storage (new mode, replaces Minio) +storage: + globalBackupEnabled: true + # Storage service connection details + service: + name: suse-observability-storage + port: 9000 + localPortForwardPort: 9000 + # Access credentials (typically from Kubernetes secret) + accessKey: storageadmin + secretKey: storageadmin + +# Stackgraph backup configuration +stackgraph: + # S3 bucket for stackgraph backups + bucket: sts-stackgraph-backup + # S3 prefix path for backups + s3Prefix: "" + # Archive split to multiple parts + multipartArchive: true + # Restore configuration + restore: + # Label selector for deployments to scale down during restore + scaleDownLabelSelector: "observability.suse.com/scalable-during-stackgraph-restore=true" + # ConfigMap containing logging configuration + loggingConfigConfigMap: suse-observability-logging + # Zookeeper quorum connection string + zookeeperQuorum: "suse-observability-zookeeper:2181" + # Job configuration + job: + labels: + app: stackgraph-restore + image: quay.io/stackstate/stackstate-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "1" + memory: "2Gi" + # PVC configuration for restore jobs + pvc: + size: "10Gi" + accessModes: + - ReadWriteOnce + +# VictoriaMetrics backup configuration +victoriaMetrics: + # S3 locations for VictoriaMetrics backups (one per instance) + S3Locations: + - bucket: sts-victoria-metrics-backup + prefix: victoria-metrics-0 + - bucket: sts-victoria-metrics-backup + prefix: victoria-metrics-1 + # Restore configuration + restore: + # HA mode for VictoriaMetrics (mirror = two independent instances) + haMode: "mirror" + # PVC prefix for VictoriaMetrics StatefulSet PVCs + persistentVolumeClaimPrefix: "database-victoria-metrics-" + # Label selector for deployments to scale down during restore + scaleDownLabelSelector: "observability.suse.com/scalable-during-vm-restore=true" + # Job configuration + job: + labels: + app: victoria-metrics-restore + image: quay.io/stackstate/victoria-metrics-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" + +# Settings backup configuration +# In storage mode, localBucket is required instead of settings.restore.pvc +settings: + bucket: sts-settings-backup + s3Prefix: "" + localBucket: sts-settings-local-backup + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" + loggingConfigConfigMap: suse-observability-logging + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "suse-observability-zookeeper:2181" + job: + labels: + app: settings-restore + image: quay.io/stackstate/settings-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" + +# ClickHouse backup configuration +clickhouse: + service: + name: "suse-observability-clickhouse-shard0-0" + port: 9000 + localPortForwardPort: 9000 + backupService: + name: "suse-observability-clickhouse-shard0-0" + port: 7171 + localPortForwardPort: 7171 + database: "default" + username: "default" + password: "password" + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-clickhouse-restore=true" diff --git a/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml b/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml new file mode 100644 index 0000000..9498e59 --- /dev/null +++ b/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml @@ -0,0 +1,171 @@ +# Valid ConfigMap-Only Configuration for SUSE Observability Backup CLI (Storage mode) +# This file contains a complete configuration using the new StorageConfig (non-legacy mode). +# Use this for tests that don't involve Secret overrides. +# In production, credentials should always be stored in Secrets, not ConfigMaps. + +elasticsearch: + # Snapshot repository configuration for S3-compatible storage + snapshotRepository: + # Name of the Elasticsearch snapshot repository + name: sts-backup + # S3 bucket name where snapshots will be stored + bucket: sts-elasticsearch-backup + # S3 endpoint (hostname:port) + endpoint: suse-observability-storage:9000 + # Base path within the bucket for snapshots (empty string for root) + basepath: "" + # Access key for S3 - included here for testing only + # In production, use Secret overrides instead + accessKey: configmap-access-key + # Secret key for S3 - included here for testing only + # In production, use Secret overrides instead + secretKey: configmap-secret-key + + # Snapshot Lifecycle Management (SLM) policy configuration + # SLM automates snapshot creation on a schedule + slm: + # Name of the SLM policy + name: auto-sts-backup + # Cron schedule for automatic snapshots (Quartz format: "second minute hour day month weekday") + # Example: "0 0 3 * * ?" = daily at 3:00 AM + schedule: "0 0 3 * * ?" + # Template for snapshot names (supports Elasticsearch date math) + # Example: "" creates snapshots like "sts-backup-20240115-0300" + snapshotTemplateName: "" + # Repository to store snapshots (must match snapshotRepository.name) + repository: sts-backup + # Indices pattern to include in snapshots (glob pattern) + indices: "sts*" + # Retention policy: delete snapshots older than this duration (e.g., 30d, 7d, 90d) + retentionExpireAfter: 30d + # Retention policy: minimum number of snapshots to keep (even if expired) + retentionMinCount: 5 + # Retention policy: maximum number of snapshots to keep + retentionMaxCount: 30 + + # Elasticsearch service connection details + service: + # Name of the Elasticsearch service in Kubernetes + name: suse-observability-elasticsearch-master-headless + # Port number for Elasticsearch HTTP API + port: 9200 + # Local port to use for port-forwarding (can be same as port) + localPortForwardPort: 9200 + + # Restore operation configuration + restore: + # Snapshot repository to restore from (must match snapshotRepository.name) + repository: sts-backup + # Kubernetes label selector for deployments to scale down during restore + # Example: "observability.suse.com/scalable-during-es-restore=true" + scaleDownLabelSelector: "observability.suse.com/scalable-during-es-restore=true" + # Prefix for regular indices to filter during restore operations + indexPrefix: sts + # Prefix for datastream indices (datastreams use pattern: .ds-{name}-{generation}) + datastreamIndexPrefix: .ds-sts_k8s_logs + # Name of the datastream (used for rollover operations) + datastreamName: sts_k8s_logs + # Pattern for indices to restore from snapshot (comma-separated glob patterns) + indicesPattern: sts*,.ds-sts_k8s_logs* + +# Storage configuration for S3-compatible storage (new mode, replaces Minio) +storage: + globalBackupEnabled: true + service: + name: suse-observability-storage + port: 9000 + localPortForwardPort: 9000 + accessKey: storageadmin + secretKey: storageadmin + +# Stackgraph backup configuration +stackgraph: + bucket: sts-stackgraph-backup + s3Prefix: "" + multipartArchive: true + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-stackgraph-restore=true" + loggingConfigConfigMap: suse-observability-logging + zookeeperQuorum: "suse-observability-zookeeper:2181" + job: + labels: + app: stackgraph-restore + image: quay.io/stackstate/stackstate-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "1" + memory: "2Gi" + pvc: + size: "10Gi" + accessModes: + - ReadWriteOnce + +# VictoriaMetrics backup configuration +victoriaMetrics: + S3Locations: + - bucket: sts-victoria-metrics-backup + prefix: victoria-metrics-0 + - bucket: sts-victoria-metrics-backup + prefix: victoria-metrics-1 + restore: + haMode: "mirror" + persistentVolumeClaimPrefix: "database-victoria-metrics-" + scaleDownLabelSelector: "observability.suse.com/scalable-during-vm-restore=true" + job: + labels: + app: victoria-metrics-restore + image: quay.io/stackstate/victoria-metrics-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" + +# Settings backup configuration +# In storage mode, localBucket is required instead of settings.restore.pvc +settings: + bucket: sts-settings-backup + s3Prefix: "" + localBucket: sts-settings-local-backup + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" + loggingConfigConfigMap: suse-observability-logging + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "suse-observability-zookeeper:2181" + job: + labels: + app: settings-restore + image: quay.io/stackstate/settings-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" + +# ClickHouse backup configuration +clickhouse: + service: + name: "suse-observability-clickhouse-shard0-0" + port: 9000 + localPortForwardPort: 9000 + backupService: + name: "suse-observability-clickhouse-shard0-0" + port: 7171 + localPortForwardPort: 7171 + database: "default" + username: "default" + password: "password" + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-clickhouse-restore=true" diff --git a/internal/foundation/config/testdata/validStorageSecretConfig.yaml b/internal/foundation/config/testdata/validStorageSecretConfig.yaml new file mode 100644 index 0000000..8ae3347 --- /dev/null +++ b/internal/foundation/config/testdata/validStorageSecretConfig.yaml @@ -0,0 +1,21 @@ +# Valid Secret Configuration for SUSE Observability Backup CLI (Storage mode) +# This file contains sensitive credentials for S3-compatible storage access. +# It is typically stored in a Kubernetes Secret and overrides values from the ConfigMap. +# +# Only the fields specified here will override the ConfigMap values. +# All other configuration remains unchanged. + +elasticsearch: + snapshotRepository: + # S3 access key (overrides ConfigMap value if present) + # This credential is used to authenticate with the S3-compatible storage + accessKey: secret-access-key + # S3 secret key (overrides ConfigMap value if present) + # Keep this value secure - it should never be committed to ConfigMaps + secretKey: secret-secret-key + +storage: + # Storage access key (overrides ConfigMap value if present) + accessKey: secret-storage-access-key + # Storage secret key (overrides ConfigMap value if present) + secretKey: secret-storage-secret-key