diff --git a/Dockerfile b/Dockerfile
index 9bac93dbe..159250eb2 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,7 +3,7 @@ ARG BUILDINFO
ARG BASEIMAGE=scratch
# Build the manager binary
-FROM golang:1.25.7 AS builder
+FROM golang:1.25.8 AS builder
ARG TARGETOS
ARG TARGETARCH
diff --git a/api/operator/v1beta1/vmcluster_types.go b/api/operator/v1beta1/vmcluster_types.go
index 4c1a43ed8..41ba43212 100644
--- a/api/operator/v1beta1/vmcluster_types.go
+++ b/api/operator/v1beta1/vmcluster_types.go
@@ -509,9 +509,9 @@ type VMBackup struct {
// SnapshotCreateURL overwrites url for snapshot create
// +optional
SnapshotCreateURL string `json:"snapshotCreateURL,omitempty"`
- // SnapShotDeleteURL overwrites url for snapshot delete
+ // SnapshotDeleteURL overwrites url for snapshot delete
// +optional
- SnapShotDeleteURL string `json:"snapshotDeleteURL,omitempty"`
+ SnapshotDeleteURL string `json:"snapshotDeleteURL,omitempty"`
// Defines number of concurrent workers. Higher concurrency may reduce backup duration (default 10)
// +optional
Concurrency *int32 `json:"concurrency,omitempty"`
diff --git a/cmd/config-reloader/main.go b/cmd/config-reloader/main.go
index cd656a617..b715c0d48 100644
--- a/cmd/config-reloader/main.go
+++ b/cmd/config-reloader/main.go
@@ -68,6 +68,8 @@ var (
"Optional TLS server name to use for connections to -reload-url.")
tlsInsecureSkipVerify = flag.Bool("reload.tlsInsecureSkipVerify", true,
"Whether to skip tls verification when connecting to -reload-url")
+ logFormat = flag.String("log-format", "",
+ "log format alias to loggerFormat")
)
var (
@@ -81,6 +83,9 @@ var (
func main() {
envflag.Parse()
+ if *logFormat != "" {
+ _ = flag.Set("loggerFormat", *logFormat)
+ }
buildinfo.Init()
logger.Init()
ctx, cancel := context.WithCancel(context.Background())
diff --git a/cmd/config-reloader/main_test.go b/cmd/config-reloader/main_test.go
new file mode 100644
index 000000000..8c0d8e0f0
--- /dev/null
+++ b/cmd/config-reloader/main_test.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "flag"
+ "testing"
+)
+
+func TestLogFormatAlias(t *testing.T) {
+ f := func(logFormatVal, loggerFormatVal, expected string) {
+ t.Helper()
+ _ = flag.Set("log-format", logFormatVal)
+ _ = flag.Set("loggerFormat", loggerFormatVal)
+
+ if *logFormat != "" {
+ _ = flag.Set("loggerFormat", *logFormat)
+ }
+
+ loggerFormatFlag := flag.Lookup("loggerFormat")
+ if loggerFormatFlag == nil {
+ t.Fatalf("expected loggerFormat flag to be registered")
+ }
+
+ if loggerFormatFlag.Value.String() != expected {
+ t.Fatalf("expected loggerFormat to be %q, got %q", expected, loggerFormatFlag.Value.String())
+ }
+ }
+
+ // only log-format is set
+ f("json", "default", "json")
+
+ // log-format is empty
+ f("", "json", "json")
+}
diff --git a/config/crd/overlay/crd.yaml b/config/crd/overlay/crd.yaml
index b5859037a..29aee3e31 100644
--- a/config/crd/overlay/crd.yaml
+++ b/config/crd/overlay/crd.yaml
@@ -32508,7 +32508,7 @@ spec:
create
type: string
snapshotDeleteURL:
- description: SnapShotDeleteURL overwrites url for snapshot
+ description: SnapshotDeleteURL overwrites url for snapshot
delete
type: string
volumeMounts:
@@ -71837,7 +71837,7 @@ spec:
description: SnapshotCreateURL overwrites url for snapshot create
type: string
snapshotDeleteURL:
- description: SnapShotDeleteURL overwrites url for snapshot delete
+ description: SnapshotDeleteURL overwrites url for snapshot delete
type: string
volumeMounts:
description: |-
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index d911ab559..b7925f8a1 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -14,6 +14,15 @@ aliases:
## tip
* FEATURE: [vmoperator](https://docs.victoriametrics.com/operator/): prettify reconcile diff in logs, now diff objects show only changed JSON fields.
+* FEATURE: [vmoperator](https://docs.victoriametrics.com/operator/): introduce VM_LOOPBACK env variable, which allows to override default loopback address, which is either `localhost` if `VM_ENABLETCP6=true` or `127.0.0.1` in other cases. Note: this change may cause component rollouts when 127.0.0.1 is used as the loopback address. Make sure to set this env var before upgrading.
+
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): recreate STS if immutable fields changed.
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): pod recreation isn't lost after triggerring StatefulSet delete.
+* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): updated VMAuth config consolidating all VMSelects into a single read and all VMAgents into a single write backend.
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): fix finalizer cleanup for VLAgent, VLogs, VMAgent, and VMSingle to target the correct resources.
+* BUGFIX: [config-reloader](https://github.com/VictoriaMetrics/operator/tree/master/cmd/config-reloader): `log-format` option supported as fallback for `loggerFormat`. See [#1954](https://github.com/VictoriaMetrics/operator/issues/1954).
+
+* SECURITY: upgrade Go builder from Go1.25.7 to Go1.25.8. See [the list of issues addressed in Go1.25.8](https://github.com/golang/go/issues?q=milestone%3AGo1.25.8+label%3ACherryPickApproved).
## [v0.68.2](https://github.com/VictoriaMetrics/operator/releases/tag/v0.68.2)
**Release date:** 04 March 2026
@@ -22,7 +31,6 @@ aliases:
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): perform statefulset pods deletion instead of eviction when maxUnavailable set to 100%, which is important for [minimum downtime strategy](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#minimum-downtime-strategy). See [#1706](https://github.com/VictoriaMetrics/operator/issues/1706).
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): VMPodScrape for VLAgent and VMAgent now uses the correct port; previously it used the wrong port and could cause scrape failures. See [#1887](https://github.com/VictoriaMetrics/operator/issues/1887).
-* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): updated VMAuth config consolidating all VMSelects into a single read and all VMClusters into a single write backend
* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): fix PVC being owned by StatefulSet and top-level object simultaenously. See [#1845](https://github.com/VictoriaMetrics/operator/issues/1845).
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): remove unneeded finalizer from core K8s resources. See [#835](https://github.com/VictoriaMetrics/operator/issues/835).
* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): remove finalizers from VMServiceScrape and VMPodScrape objects, and keep finalizers on VMAgent, VMCluster, and VMAuth when DeletionTimestamp is not empty.
diff --git a/docs/api.md b/docs/api.md
index 22bc407ac..93eab4967 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -3832,7 +3832,7 @@ Appears in: [VMSingleSpec](#vmsinglespec), [VMStorage](#vmstorage)
| resources#
_[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#resourcerequirements-v1-core)_ | _(Optional)_
Resources container resource request and limits, https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
if not defined default resources from operator config will be used |
| restore#
_[VMRestore](#vmrestore)_ | _(Optional)_
Restore Allows to enable restore options for pod
Read [more](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/#restore-commands) |
| snapshotCreateURL#
_string_ | _(Optional)_
SnapshotCreateURL overwrites url for snapshot create |
-| snapshotDeleteURL#
_string_ | _(Optional)_
SnapShotDeleteURL overwrites url for snapshot delete |
+| snapshotDeleteURL#
_string_ | _(Optional)_
SnapshotDeleteURL overwrites url for snapshot delete |
| volumeMounts#
_[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.35/#volumemount-v1-core) array_ | _(Optional)_
VolumeMounts allows configuration of additional VolumeMounts on the output Deployment definition.
VolumeMounts specified will be appended to other VolumeMounts in the vmbackupmanager container,
that are generated as a result of StorageSpec objects. |
#### VMCluster
diff --git a/docs/config-reloader-flags.md b/docs/config-reloader-flags.md
index f0ae40f8f..299d47a34 100644
--- a/docs/config-reloader-flags.md
+++ b/docs/config-reloader-flags.md
@@ -69,6 +69,8 @@ Usage of bin/config-reloader:
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
+ -log-format string
+ log format alias to loggerFormat
-loggerDisableTimestamps
Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int
diff --git a/docs/env.md b/docs/env.md
index 5a21dfae9..b54ea6780 100644
--- a/docs/env.md
+++ b/docs/env.md
@@ -12,6 +12,7 @@
| VM_CUSTOMCONFIGRELOADERIMAGE: `-` #
Deprecated: use VM_CONFIG_RELOADER_IMAGE instead |
| VM_PSPAUTOCREATEENABLED: `false` # |
| VM_ENABLETCP6: `false` # |
+| VM_LOOPBACK: `-` #
Overrides default loopback interface that will be used for all VM components |
| VM_CONFIG_RELOADER_IMAGE: `victoriametrics/operator:config-reloader-${VM_OPERATOR_VERSION}` #
default image for all config-reloader containers |
| VM_CONFIG_RELOADER_LIMIT_MEMORY: `unlimited` #
defines global resource.limits.memory for all config-reloader containers |
| VM_CONFIG_RELOADER_LIMIT_CPU: `unlimited` #
defines global resource.limits.cpu for all config-reloader containers |
diff --git a/internal/config/config.go b/internal/config/config.go
index dbf500ce8..2de9d0bf4 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -115,6 +115,8 @@ type BaseOperatorConf struct {
CustomConfigReloaderImage string `env:"VM_CUSTOMCONFIGRELOADERIMAGE"`
PSPAutoCreateEnabled bool `default:"false" env:"VM_PSPAUTOCREATEENABLED"`
EnableTCP6 bool `default:"false" env:"VM_ENABLETCP6"`
+ // Overrides default loopback interface that will be used for all VM components
+ Loopback string `env:"VM_LOOPBACK"`
// defines global config reloader parameters
ConfigReloader struct {
@@ -715,6 +717,9 @@ func MustGetBaseConfig() *BaseOperatorConf {
// GetLocalhost returns localhost value depending on global configuration
func GetLocalhost() string {
cfg := MustGetBaseConfig()
+ if len(cfg.Loopback) > 0 {
+ return cfg.Loopback
+ }
if cfg.EnableTCP6 {
return "localhost"
}
diff --git a/internal/controller/operator/factory/build/backup.go b/internal/controller/operator/factory/build/backup.go
index e2a56114d..872b51f88 100644
--- a/internal/controller/operator/factory/build/backup.go
+++ b/internal/controller/operator/factory/build/backup.go
@@ -34,7 +34,7 @@ func VMBackupManager(
return nil, nil
}
snapshotCreateURL := cr.SnapshotCreateURL
- snapshotDeleteURL := cr.SnapShotDeleteURL
+ snapshotDeleteURL := cr.SnapshotDeleteURL
if snapshotCreateURL == "" {
// http://localhost:port/snapshot/create
snapshotCreateURL = cr.SnapshotCreatePathWithFlags(config.GetLocalhost(), port, extraArgs)
diff --git a/internal/controller/operator/factory/reconcile/diff.go b/internal/controller/operator/factory/reconcile/diff.go
index a506e5b56..b493acf1a 100644
--- a/internal/controller/operator/factory/reconcile/diff.go
+++ b/internal/controller/operator/factory/reconcile/diff.go
@@ -84,7 +84,7 @@ func (r *fieldDiffRecorder) Report(rs cmp.Result) {
key += fmt.Sprintf("[%d\u2192%d]", ix, iy)
}
case cmp.MapIndex:
- key += "['" + v.String() + "']"
+ key += strings.ReplaceAll(v.String(), `"`, `'`)
}
}
if !moved {
diff --git a/internal/controller/operator/factory/reconcile/diff_test.go b/internal/controller/operator/factory/reconcile/diff_test.go
index 56aad69f6..66a638604 100644
--- a/internal/controller/operator/factory/reconcile/diff_test.go
+++ b/internal/controller/operator/factory/reconcile/diff_test.go
@@ -8,108 +8,217 @@ import (
)
func TestDiffDeepOk(t *testing.T) {
- f := func(desiredObj, currentObj any, expected string) {
- t.Helper()
- got := diffDeep(desiredObj, currentObj, "spec")
- data, err := json.Marshal(got)
- assert.NoError(t, err)
- assert.Equal(t, string(data), expected)
- }
- var a1Slice []string
- a2SliceNonNil := make([]string, 0)
- f(a1Slice, a2SliceNonNil, `{}`)
- a1MapNonNil := make(map[int]int, 0)
- var a2MapNil map[int]int
- f(a1MapNonNil, a2MapNil, `{}`)
- f(5, 5, `{}`)
- f(5, 6, `{"spec":{"--":6,"++":5}}`)
- f("new", "line", `{"spec":{"--":"line","++":"new"}}`)
-
type cmpStruct struct {
- Field1 []string `json:"field1,omitempty"`
- Field2 map[int]int `json:"field2,omitempty"`
+ Field1 []string `json:"field1,omitempty"`
+ Field2 map[string]int `json:"field2,omitempty"`
Field3 int32
+ Field4 map[int]int `json:"field4,omitempty"`
+ Field5 string
}
- var a1StructEmpty cmpStruct
- a2StructFilled := cmpStruct{
- Field1: make([]string, 0),
- Field3: 5,
+ type opts struct {
+ desired, current *cmpStruct
+ expected, expectedDerivative string
}
- f(a1StructEmpty, a2StructFilled, `{"spec.Field3":{"--":5,"++":0}}`)
-
- var a2StructEmptyPtr *cmpStruct
- a1StructFilledPtr := &cmpStruct{
- Field2: make(map[int]int),
- Field3: 10,
- }
- f(a1StructFilledPtr, a2StructEmptyPtr, `{"spec":{"++":{"Field3":10}}}`)
-}
-
-func TestDiffDeepDerivativeOk(t *testing.T) {
- f := func(desiredObj, currentObj any, expected string) {
+ f := func(o opts) {
t.Helper()
- got := diffDeepDerivative(desiredObj, currentObj, "spec")
+ got := diffDeepInternal(o.desired, o.current, false, "spec")
data, err := json.Marshal(got)
assert.NoError(t, err)
- assert.Equal(t, string(data), expected)
- }
- var oldS []string
- newS := make([]string, 0)
- f(oldS, newS, `{}`)
- var newM map[int]int
- oldM := make(map[int]int, 0)
- f(oldM, newM, `{}`)
- f(5, 5, `{}`)
- f(5, 6, `{"spec":{"--":6,"++":5}}`)
- f("new", "line", `{"spec":{"--":"line","++":"new"}}`)
+ assert.Equal(t, string(data), o.expected)
- type cmpStruct struct {
- Field1 []string `json:"field1,omitempty"`
- Field2 map[int]int `json:"field2,omitempty"`
- Field3 int32 `json:"field3"`
- }
- var a2Empty cmpStruct
- a1Filled := cmpStruct{
- Field1: []string{"v1"},
+ // derivative
+ got = diffDeepInternal(o.desired, o.current, true, "spec")
+ data, err = json.Marshal(got)
+ assert.NoError(t, err)
+ assert.Equal(t, string(data), o.expectedDerivative)
}
- f(a1Filled, a2Empty, `{"spec.field1":{"++":["v1"]}}`)
- a1EmptyPtr := &cmpStruct{}
- a2FilledPtr := &cmpStruct{
- Field1: []string{"1", "2"},
- Field2: make(map[int]int),
- }
- f(a2FilledPtr, a1EmptyPtr, `{"spec.field1":{"++":["1","2"]}}`)
+ // nil to empty slice
+ f(opts{
+ desired: &cmpStruct{
+ Field1: make([]string, 0),
+ },
+ current: &cmpStruct{},
+ expected: `{}`,
+ expectedDerivative: `{}`,
+ })
- a3PartialPtr := &cmpStruct{
- Field1: []string{"1"},
- }
- a3FilledPtr := &cmpStruct{
- Field1: []string{"1", "2"},
- }
- f(a3PartialPtr, a3FilledPtr, `{"spec.field1[1]":{"--":"2"}}`)
+ // nil to empty map
+ f(opts{
+ desired: &cmpStruct{
+ Field4: make(map[int]int),
+ },
+ current: &cmpStruct{},
+ expected: `{}`,
+ expectedDerivative: `{}`,
+ })
- a4PartialPtr := &cmpStruct{
- Field1: []string{"1"},
- }
- a4FilledPtr := &cmpStruct{
- Field1: []string{"2"},
- }
- f(a4PartialPtr, a4FilledPtr, `{"spec.field1[0]":{"--":"2","++":"1"}}`)
+ // desired slice is empty
+ f(opts{
+ desired: &cmpStruct{},
+ current: &cmpStruct{
+ Field1: []string{"1"},
+ },
+ expected: `{"spec.field1":{"--":["1"]}}`,
+ expectedDerivative: `{}`,
+ })
- a5PartialPtr := &cmpStruct{
- Field1: []string{"1", "2"},
- }
- a5FilledPtr := &cmpStruct{
- Field1: []string{"2"},
- }
- f(a5PartialPtr, a5FilledPtr, `{"spec.field1[0]":{"++":"1"}}`)
+ // desired struct is nil
+ f(opts{
+ current: &cmpStruct{
+ Field1: []string{"1"},
+ },
+ expected: `{"spec":{"--":{"field1":["1"],"Field3":0,"Field5":""}}}`,
+ expectedDerivative: `{}`,
+ })
- a6PartialPtr := &cmpStruct{
- Field1: []string{"1", "2"},
- }
- a6FilledPtr := &cmpStruct{
- Field1: []string{"1"},
- }
- f(a6PartialPtr, a6FilledPtr, `{"spec.field1[1]":{"++":"2"}}`)
+ // equal numbers
+ f(opts{
+ desired: &cmpStruct{
+ Field3: 5,
+ },
+ current: &cmpStruct{
+ Field3: 5,
+ },
+ expected: `{}`,
+ expectedDerivative: `{}`,
+ })
+
+ // different numbers
+ f(opts{
+ desired: &cmpStruct{
+ Field3: 5,
+ },
+ current: &cmpStruct{
+ Field3: 6,
+ },
+ expected: `{"spec.Field3":{"--":6,"++":5}}`,
+ expectedDerivative: `{"spec.Field3":{"--":6,"++":5}}`,
+ })
+
+ // different strings
+ f(opts{
+ desired: &cmpStruct{
+ Field5: "new",
+ },
+ current: &cmpStruct{
+ Field5: "line",
+ },
+ expected: `{"spec.Field5":{"--":"line","++":"new"}}`,
+ expectedDerivative: `{"spec.Field5":{"--":"line","++":"new"}}`,
+ })
+
+ // slice and number
+ f(opts{
+ desired: &cmpStruct{},
+ current: &cmpStruct{
+ Field1: make([]string, 0),
+ Field3: 5,
+ },
+ expected: `{"spec.Field3":{"--":5,"++":0}}`,
+ expectedDerivative: `{"spec.Field3":{"--":5,"++":0}}`,
+ })
+
+ // map and number
+ f(opts{
+ desired: &cmpStruct{
+ Field2: make(map[string]int),
+ Field3: 10,
+ },
+ current: &cmpStruct{},
+ expected: `{"spec.Field3":{"--":0,"++":10}}`,
+ expectedDerivative: `{"spec.Field3":{"--":0,"++":10}}`,
+ })
+
+ // nil slice with 1 element slice
+ f(opts{
+ desired: &cmpStruct{
+ Field1: []string{"v1"},
+ },
+ current: &cmpStruct{},
+ expected: `{"spec.field1":{"++":["v1"]}}`,
+ expectedDerivative: `{"spec.field1":{"++":["v1"]}}`,
+ })
+
+ // nil slice with 2 element slice
+ f(opts{
+ desired: &cmpStruct{
+ Field1: []string{"1", "2"},
+ },
+ current: &cmpStruct{},
+ expected: `{"spec.field1":{"++":["1","2"]}}`,
+ expectedDerivative: `{"spec.field1":{"++":["1","2"]}}`,
+ })
+
+ // different non-empty slices, desired slice is smaller
+ f(opts{
+ desired: &cmpStruct{
+ Field1: []string{"1"},
+ },
+ current: &cmpStruct{
+ Field1: []string{"1", "2"},
+ },
+ expected: `{"spec.field1[1]":{"--":"2"}}`,
+ expectedDerivative: `{"spec.field1[1]":{"--":"2"}}`,
+ })
+
+ // different non-empty slices with same length
+ f(opts{
+ desired: &cmpStruct{
+ Field1: []string{"1"},
+ },
+ current: &cmpStruct{
+ Field1: []string{"2"},
+ },
+ expected: `{"spec.field1[0]":{"--":"2","++":"1"}}`,
+ expectedDerivative: `{"spec.field1[0]":{"--":"2","++":"1"}}`,
+ })
+
+ // different non-empty slices, desired slice is bigger
+ f(opts{
+ desired: &cmpStruct{
+ Field1: []string{"1", "2"},
+ },
+ current: &cmpStruct{
+ Field1: []string{"2"},
+ },
+ expected: `{"spec.field1[0]":{"++":"1"}}`,
+ expectedDerivative: `{"spec.field1[0]":{"++":"1"}}`,
+ })
+
+ // swapped int keys of map
+ f(opts{
+ desired: &cmpStruct{
+ Field2: map[string]int{
+ "1": 1,
+ "2": 2,
+ },
+ },
+ current: &cmpStruct{
+ Field2: map[string]int{
+ "1": 2,
+ "2": 1,
+ },
+ },
+ expected: `{"spec.field2['1']":{"--":2,"++":1},"spec.field2['2']":{"--":1,"++":2}}`,
+ expectedDerivative: `{"spec.field2['1']":{"--":2,"++":1},"spec.field2['2']":{"--":1,"++":2}}`,
+ })
+
+ // swapped string keys of map
+ f(opts{
+ desired: &cmpStruct{
+ Field4: map[int]int{
+ 1: 1,
+ 2: 2,
+ },
+ },
+ current: &cmpStruct{
+ Field4: map[int]int{
+ 1: 2,
+ 2: 1,
+ },
+ },
+ expected: `{"spec.field4[1]":{"--":2,"++":1},"spec.field4[2]":{"--":1,"++":2}}`,
+ expectedDerivative: `{"spec.field4[1]":{"--":2,"++":1},"spec.field4[2]":{"--":1,"++":2}}`,
+ })
}
diff --git a/internal/controller/operator/factory/reconcile/statefulset.go b/internal/controller/operator/factory/reconcile/statefulset.go
index e98c32252..ebcc679d3 100644
--- a/internal/controller/operator/factory/reconcile/statefulset.go
+++ b/internal/controller/operator/factory/reconcile/statefulset.go
@@ -19,9 +19,11 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
+ "github.com/VictoriaMetrics/operator/internal/controller/operator/factory/finalize"
"github.com/VictoriaMetrics/operator/internal/controller/operator/factory/logger"
)
@@ -71,7 +73,7 @@ func StatefulSet(ctx context.Context, rclient client.Client, cr STSOptions, newO
if err := validateStatefulSet(newObj); err != nil {
return err
}
- var mustRecreatePod bool
+ var recreatePod bool
var prevMeta *metav1.ObjectMeta
var prevTemplateAnnotations map[string]string
var prevVCTs []corev1.PersistentVolumeClaim
@@ -84,7 +86,6 @@ func StatefulSet(ctx context.Context, rclient client.Client, cr STSOptions, newO
rclient.Scheme().Default(newObj)
updateStrategy := newObj.Spec.UpdateStrategy.Type
nsn := types.NamespacedName{Name: newObj.Name, Namespace: newObj.Namespace}
- var recreateSTS func() error
removeFinalizer := true
err := retryOnConflict(func() error {
var existingObj appsv1.StatefulSet
@@ -115,14 +116,28 @@ func StatefulSet(ctx context.Context, rclient client.Client, cr STSOptions, newO
spec.Replicas = existingObj.Spec.Replicas
}
- var mustRecreateSTS bool
- mustRecreateSTS, mustRecreatePod = isSTSRecreateRequired(ctx, &existingObj, newObj, prevVCTs)
+ mustRecreateSTS, mustRecreatePod := isSTSRecreateRequired(ctx, &existingObj, newObj, prevVCTs)
if mustRecreateSTS {
- recreateSTS = func() error {
- return removeStatefulSetKeepPods(ctx, rclient, &existingObj, newObj)
+ recreatePod = mustRecreatePod
+ if err := finalize.RemoveFinalizer(ctx, rclient, &existingObj); err != nil {
+ return fmt.Errorf("failed to remove finalizer from StatefulSet=%s: %w", nsn.String(), err)
+ }
+ opts := client.DeleteOptions{
+ PropagationPolicy: ptr.To(metav1.DeletePropagationOrphan),
+ }
+ if err := rclient.Delete(ctx, &existingObj, &opts); err != nil {
+ return err
+ }
+ return newErrRecreate(ctx, &existingObj)
+ }
+
+ // check if pvcs need to resize
+ if cr.HasClaim {
+ if err := updateSTSPVC(ctx, rclient, &existingObj, prevVCTs); err != nil {
+ return err
}
- return nil
}
+
metaChanged, err := mergeMeta(&existingObj, newObj, prevMeta, owner, removeFinalizer)
if err != nil {
return err
@@ -139,27 +154,17 @@ func StatefulSet(ctx context.Context, rclient client.Client, cr STSOptions, newO
if err := rclient.Update(ctx, &existingObj); err != nil {
return fmt.Errorf("cannot perform update on StatefulSet=%s: %w", nsn.String(), err)
}
- // check if pvcs need to resize
- if cr.HasClaim {
- return updateSTSPVC(ctx, rclient, &existingObj, owner)
- }
return nil
})
if err != nil {
return err
}
- if recreateSTS != nil {
- if err = recreateSTS(); err != nil {
- return err
- }
- }
-
// perform manual update only with OnDelete policy, which is default.
switch updateStrategy {
case appsv1.OnDeleteStatefulSetStrategyType:
opts := rollingUpdateOpts{
- recreate: mustRecreatePod,
+ recreate: recreatePod,
selector: cr.SelectorLabels(),
maxUnavailable: 1,
}
diff --git a/internal/controller/operator/factory/reconcile/statefulset_pvc_expand.go b/internal/controller/operator/factory/reconcile/statefulset_pvc_expand.go
index 17e62ac1a..a438b12cb 100644
--- a/internal/controller/operator/factory/reconcile/statefulset_pvc_expand.go
+++ b/internal/controller/operator/factory/reconcile/statefulset_pvc_expand.go
@@ -5,23 +5,19 @@ import (
"fmt"
"sort"
"strings"
- "time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/equality"
- k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
"github.com/VictoriaMetrics/operator/internal/config"
- "github.com/VictoriaMetrics/operator/internal/controller/operator/factory/finalize"
"github.com/VictoriaMetrics/operator/internal/controller/operator/factory/logger"
)
@@ -57,7 +53,7 @@ func isSTSRecreateRequired(ctx context.Context, existingObj, newObj *appsv1.Stat
return true, true
}
prevVCT := getPVCByName(prevVCTs, newVCT.Name)
- if immutableVCTFieldsChanged(ctx, existingVCT, &newVCT, prevVCT) {
+ if changedVCTFields(ctx, existingVCT, &newVCT, prevVCT) {
l.Info(fmt.Sprintf("VolumeClaimTemplate=%s have some changes, recreating StatefulSet", newVCT.Name))
vctChanged = true
}
@@ -66,7 +62,7 @@ func isSTSRecreateRequired(ctx context.Context, existingObj, newObj *appsv1.Stat
if vctChanged {
return true, false
}
- if immutableSTSFieldsChanged(ctx, existingObj, newObj) {
+ if changedImmutableSTSFields(ctx, existingObj, newObj) {
return true, false
}
@@ -82,7 +78,7 @@ func getPVCByName(vcts []corev1.PersistentVolumeClaim, name string) *corev1.Pers
return nil
}
-func updateSTSPVC(ctx context.Context, rclient client.Client, sts *appsv1.StatefulSet, owner *metav1.OwnerReference) error {
+func updateSTSPVC(ctx context.Context, rclient client.Client, sts *appsv1.StatefulSet, prevVCTs []corev1.PersistentVolumeClaim) error {
// fast path
if sts.Spec.Replicas != nil && *sts.Spec.Replicas == 0 {
return nil
@@ -120,8 +116,9 @@ func updateSTSPVC(ctx context.Context, rclient client.Client, sts *appsv1.Statef
l.Info(fmt.Sprintf("possible BUG, cannot find target PVC=%s in new statefulset by claim name=%s", pvc.Name, stsClaimName))
continue
}
+ prevVCT := getPVCByName(prevVCTs, stsClaimName)
// update PVC size and metadata if it's needed
- if err := updatePVC(ctx, rclient, &pvc, &stsClaim, nil, owner); err != nil {
+ if err := updatePVC(ctx, rclient, &pvc, &stsClaim, prevVCT, nil); err != nil {
return err
}
}
@@ -237,25 +234,13 @@ func isStorageClassExpandable(ctx context.Context, rclient client.Client, pvc *c
return false, nil
}
-func immutableVCTFieldsChanged(ctx context.Context, existingObj, newObj, prevObj *corev1.PersistentVolumeClaim) bool {
+func changedVCTFields(ctx context.Context, existingObj, newObj, prevObj *corev1.PersistentVolumeClaim) bool {
if existingObj == nil && newObj == nil {
return false
}
if existingObj == nil || newObj == nil {
return true
}
- newSize := newObj.Spec.Resources.Requests.Storage()
- existingSize := existingObj.Spec.Resources.Requests.Storage()
- nsn := types.NamespacedName{Name: existingObj.Name, Namespace: existingObj.Namespace}
- if newSize.Cmp(*existingSize) < 0 {
- logger.WithContext(ctx).Info(fmt.Sprintf("must recreate STS, its PVC=%s claim size=%s was decreased to %s", nsn.String(), existingSize.String(), newSize.String()))
- return true
- }
- newSpec := newObj.Spec.DeepCopy()
- if existingSize != nil {
- newSpec.Resources.Requests[corev1.ResourceStorage] = *existingSize
- }
-
var prevLabels, prevAnnotations map[string]string
if prevObj != nil {
prevLabels = prevObj.Labels
@@ -265,20 +250,21 @@ func immutableVCTFieldsChanged(ctx context.Context, existingObj, newObj, prevObj
newLabels := mergeMaps(existingObj.Labels, newObj.Labels, prevLabels)
isEqual := equality.Semantic.DeepEqual(existingObj.Labels, newLabels) &&
equality.Semantic.DeepEqual(existingObj.Annotations, newAnnotations)
- specDiff := diffDeepDerivative(newSpec, &existingObj.Spec, "spec")
+ specDiff := diffDeepDerivative(newObj.Spec, existingObj.Spec, "spec")
isEqual = isEqual && len(specDiff) == 0
if isEqual {
return false
}
+ nsn := types.NamespacedName{Name: existingObj.Name, Namespace: existingObj.Namespace}
logger.WithContext(ctx).Info(fmt.Sprintf("changes detected for PVC=%s", nsn.String()), "spec_diff", specDiff)
return true
}
-// immutableSTSFieldsChanged checks if immutable newObj fields were changed
+// changedImmutableSTSFields checks if immutable newObj fields were changed
//
// logic was borrowed from
// https://github.com/kubernetes/kubernetes/blob/a866cbe2e5bbaa01cfd5e969aa3e033f3282a8a2/pkg/apis/apps/validation/validation.go#L166
-func immutableSTSFieldsChanged(ctx context.Context, existingObj, newObj *appsv1.StatefulSet) bool {
+func changedImmutableSTSFields(ctx context.Context, existingObj, newObj *appsv1.StatefulSet) bool {
// statefulset updates aren't super common and general updates are likely to be touching spec, so we'll do this
// deep copy right away. This avoids mutating our inputs
newObjClone := newObj.DeepCopy()
@@ -301,40 +287,3 @@ func immutableSTSFieldsChanged(ctx context.Context, existingObj, newObj *appsv1.
logger.WithContext(ctx).Info(fmt.Sprintf("immutable StatefulSet=%s field changed, spec_diff=%v", nsn.String(), specDiff))
return true
}
-
-func removeStatefulSetKeepPods(ctx context.Context, rclient client.Client, existingObj, newObj *appsv1.StatefulSet) error {
- // removes finalizer from exist sts, it allows to delete it
- nsn := types.NamespacedName{Name: existingObj.Name, Namespace: existingObj.Namespace}
- if err := finalize.RemoveFinalizer(ctx, rclient, existingObj); err != nil {
- return fmt.Errorf("failed to remove finalizer from StatefulSet=%s: %w", nsn.String(), err)
- }
- opts := client.DeleteOptions{
- PropagationPolicy: ptr.To(metav1.DeletePropagationOrphan),
- }
- if err := rclient.Delete(ctx, existingObj, &opts); err != nil {
- return err
- }
-
- // wait until sts disappears
- if err := wait.PollUntilContextTimeout(ctx, time.Second, time.Second*30, true, func(ctx context.Context) (done bool, err error) {
- if err := rclient.Get(ctx, nsn, &appsv1.StatefulSet{}); err != nil {
- if k8serrors.IsNotFound(err) {
- return true, nil
- }
- return false, fmt.Errorf("unexpected error for polling, want notFound, got: %w", err)
- }
- return false, nil
- }); err != nil {
- return fmt.Errorf("cannot wait for StatefulSet=%s to be deleted: %w", nsn.String(), err)
- }
-
- if err := rclient.Create(ctx, newObj); err != nil {
- // try to restore previous one and throw error
- existingObj.ResourceVersion = ""
- if err2 := rclient.Create(ctx, existingObj); err2 != nil {
- return fmt.Errorf("cannot restore previous StatefulSet=%s configuration after remove original error: %s: restore error %w", nsn.String(), err, err2)
- }
- return fmt.Errorf("cannot create new StatefulSet=%s instead of replaced, perform manual action to handle this error or report BUG: %w", nsn.String(), err)
- }
- return nil
-}
diff --git a/internal/controller/operator/factory/reconcile/statefulset_pvc_expand_test.go b/internal/controller/operator/factory/reconcile/statefulset_pvc_expand_test.go
index db4ebc518..6014a529c 100644
--- a/internal/controller/operator/factory/reconcile/statefulset_pvc_expand_test.go
+++ b/internal/controller/operator/factory/reconcile/statefulset_pvc_expand_test.go
@@ -23,7 +23,6 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
type opts struct {
newObj *appsv1.StatefulSet
existingObj *appsv1.StatefulSet
- validate func(sts *appsv1.StatefulSet)
mustRecreateSTS bool
mustRecreatePod bool
}
@@ -48,17 +47,9 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
t.Helper()
ctx := context.TODO()
mustRecreateSTS, mustRecreatePod := isSTSRecreateRequired(ctx, existingObj, newObj, nil)
- if mustRecreateSTS {
- assert.NoError(t, removeStatefulSetKeepPods(ctx, cl, existingObj, newObj))
- } else {
- updatedSTS := existingObj.DeepCopy()
- updatedSTS.Spec = newObj.Spec
- assert.NoError(t, cl.Update(ctx, updatedSTS))
- }
var updatedSTS appsv1.StatefulSet
nsn := types.NamespacedName{Namespace: newObj.Namespace, Name: newObj.Name}
assert.NoError(t, cl.Get(ctx, nsn, &updatedSTS))
- o.validate(&updatedSTS)
assert.Equal(t, mustRecreateSTS, o.mustRecreateSTS)
assert.Equal(t, mustRecreatePod, o.mustRecreatePod)
}
@@ -82,37 +73,24 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
// create sts with a new claim
f(opts{
- existingObj: buildSTS(),
- newObj: buildSTS(setVCT("new-claim", "", "")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- },
+ existingObj: buildSTS(),
+ newObj: buildSTS(setVCT("new-claim", "", "")),
mustRecreateSTS: true,
mustRecreatePod: true,
})
// resize claim at sts
f(opts{
- existingObj: buildSTS(setVCT("new-claim", "10Gi", "")),
- newObj: buildSTS(setVCT("new-claim", "15Gi", "")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- sz := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
- assert.Equal(t, sz, "15Gi")
- },
- mustRecreateSTS: false,
+ existingObj: buildSTS(setVCT("new-claim", "10Gi", "")),
+ newObj: buildSTS(setVCT("new-claim", "15Gi", "")),
+ mustRecreateSTS: true,
mustRecreatePod: false,
})
// downsize claim
f(opts{
- existingObj: buildSTS(setVCT("new-claim", "15Gi", "")),
- newObj: buildSTS(setVCT("new-claim", "10Gi", "")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- sz := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String()
- assert.Equal(t, sz, "10Gi")
- },
+ existingObj: buildSTS(setVCT("new-claim", "15Gi", "")),
+ newObj: buildSTS(setVCT("new-claim", "10Gi", "")),
mustRecreateSTS: true,
mustRecreatePod: false,
})
@@ -123,24 +101,14 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
newObj: buildSTS(setVCT("new-claim", "10Gi", "old-sc"), func(sts *appsv1.StatefulSet) {
sts.Spec.VolumeClaimTemplates[0].Labels = map[string]string{"test": "test"}
}),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- labels := sts.Spec.VolumeClaimTemplates[0].Labels
- assert.Equal(t, labels["test"], "test")
- },
mustRecreateSTS: true,
mustRecreatePod: false,
})
// change claim storageClass name
f(opts{
- existingObj: buildSTS(setVCT("new-claim", "10Gi", "old-sc")),
- newObj: buildSTS(setVCT("new-claim", "10Gi", "new-sc")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- name := *sts.Spec.VolumeClaimTemplates[0].Spec.StorageClassName
- assert.Equal(t, name, "new-sc")
- },
+ existingObj: buildSTS(setVCT("new-claim", "10Gi", "old-sc")),
+ newObj: buildSTS(setVCT("new-claim", "10Gi", "new-sc")),
mustRecreateSTS: true,
mustRecreatePod: false,
})
@@ -153,11 +121,8 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
}
}
f(opts{
- existingObj: buildSTS(setService("old-service")),
- newObj: buildSTS(setService("new-service")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Equal(t, sts.Spec.ServiceName, "new-service")
- },
+ existingObj: buildSTS(setService("old-service")),
+ newObj: buildSTS(setService("new-service")),
mustRecreateSTS: true,
mustRecreatePod: true,
})
@@ -169,23 +134,16 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
}
}
f(opts{
- existingObj: buildSTS(setPolicy(appsv1.OrderedReadyPodManagement)),
- newObj: buildSTS(setPolicy(appsv1.ParallelPodManagement)),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Equal(t, sts.Spec.PodManagementPolicy, appsv1.ParallelPodManagement)
- },
+ existingObj: buildSTS(setPolicy(appsv1.OrderedReadyPodManagement)),
+ newObj: buildSTS(setPolicy(appsv1.ParallelPodManagement)),
mustRecreateSTS: true,
mustRecreatePod: false,
})
// rename claim
f(opts{
- existingObj: buildSTS(setVCT("old-claim", "", "")),
- newObj: buildSTS(setVCT("new-claim", "", "")),
- validate: func(sts *appsv1.StatefulSet) {
- assert.Len(t, sts.Spec.VolumeClaimTemplates, 1)
- assert.Equal(t, "new-claim", sts.Spec.VolumeClaimTemplates[0].Name)
- },
+ existingObj: buildSTS(setVCT("old-claim", "", "")),
+ newObj: buildSTS(setVCT("new-claim", "", "")),
mustRecreateSTS: true,
mustRecreatePod: true,
})
@@ -194,6 +152,7 @@ func Test_recreateOrUpdateSTS(t *testing.T) {
func Test_updateSTSPVC(t *testing.T) {
type opts struct {
sts *appsv1.StatefulSet
+ prevVCTs []corev1.PersistentVolumeClaim
wantErr bool
preRun func(c client.Client)
expected []corev1.PersistentVolumeClaim
@@ -207,7 +166,7 @@ func Test_updateSTSPVC(t *testing.T) {
o.preRun(cl.Client)
cl.Actions = nil
}
- err := updateSTSPVC(ctx, cl, o.sts, nil)
+ err := updateSTSPVC(ctx, cl, o.sts, o.prevVCTs)
if o.wantErr {
assert.Error(t, err)
} else {
@@ -247,6 +206,107 @@ func Test_updateSTSPVC(t *testing.T) {
pvc1NSN := types.NamespacedName{Name: "vmselect-cachedir-vmselect-0", Namespace: "default"}
pvc2NSN := types.NamespacedName{Name: "test-vmselect-0", Namespace: "default"}
+ // do not update if only 3-rd party metadata differs
+ f(opts{
+ sts: buildSTS(func(sts *appsv1.StatefulSet) {
+ sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ },
+ Labels: map[string]string{"app": "vmselect"},
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ },
+ }
+ }),
+ prevVCTs: []corev1.PersistentVolumeClaim{{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ },
+ Labels: map[string]string{
+ "app": "vmselect",
+ },
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ }},
+ preRun: func(c client.Client) {
+ assert.NoError(t, c.Create(context.TODO(), &corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: pvc1NSN.Name,
+ Namespace: pvc1NSN.Namespace,
+ Labels: map[string]string{
+ "app": "vmselect",
+ "3rd-party": "value",
+ },
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ "3rd-party": "value",
+ },
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ }))
+ assert.NoError(t, c.Create(context.TODO(), &storagev1.StorageClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "standard",
+ Annotations: map[string]string{
+ "volume.beta.kubernetes.io/storage-class": "true",
+ },
+ },
+ }))
+ },
+ expected: []corev1.PersistentVolumeClaim{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: pvc1NSN.Name,
+ Namespace: pvc1NSN.Namespace,
+ Labels: map[string]string{
+ "app": "vmselect",
+ "3rd-party": "value",
+ },
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ "3rd-party": "value",
+ },
+ ResourceVersion: "1",
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ },
+ },
+ })
+
// update metadata only
f(opts{
sts: buildSTS(func(sts *appsv1.StatefulSet) {
@@ -270,15 +330,38 @@ func Test_updateSTSPVC(t *testing.T) {
},
}
}),
+ prevVCTs: []corev1.PersistentVolumeClaim{{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "after",
+ },
+ Labels: map[string]string{
+ "app": "vmselect",
+ },
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ }},
preRun: func(c client.Client) {
assert.NoError(t, c.Create(context.TODO(), &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvc1NSN.Name,
Namespace: pvc1NSN.Namespace,
- Labels: map[string]string{"app": "vmselect"},
+ Labels: map[string]string{
+ "app": "vmselect",
+ "3rd-party": "value",
+ },
Annotations: map[string]string{
"operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
- "test": "before",
+ "test": "before",
+ "3rd-party": "value",
},
},
Spec: corev1.PersistentVolumeClaimSpec{
@@ -306,10 +389,14 @@ func Test_updateSTSPVC(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: pvc1NSN.Name,
Namespace: pvc1NSN.Namespace,
- Labels: map[string]string{"app": "vmselect"},
+ Labels: map[string]string{
+ "app": "vmselect",
+ "3rd-party": "value",
+ },
Annotations: map[string]string{
"operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
- "test": "after",
+ "test": "after",
+ "3rd-party": "value",
},
ResourceVersion: "2",
},
diff --git a/internal/controller/operator/factory/reconcile/statefulset_test.go b/internal/controller/operator/factory/reconcile/statefulset_test.go
index dedcf3448..acd910ea0 100644
--- a/internal/controller/operator/factory/reconcile/statefulset_test.go
+++ b/internal/controller/operator/factory/reconcile/statefulset_test.go
@@ -11,6 +11,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -842,6 +843,81 @@ func TestStatefulsetReconcile(t *testing.T) {
},
})
+ // recreate on VCT size change
+ f(opts{
+ new: getSts(func(s *appsv1.StatefulSet) {
+ s.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ },
+ Labels: map[string]string{"app": "vmselect"},
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ }}
+ }),
+ prev: getSts(func(s *appsv1.StatefulSet) {
+ s.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ },
+ Labels: map[string]string{"app": "vmselect"},
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("5Gi"),
+ },
+ },
+ },
+ }}
+ }),
+ predefinedObjects: []runtime.Object{
+ getSts(func(s *appsv1.StatefulSet) {
+ s.Finalizers = []string{vmv1beta1.FinalizerName}
+ s.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vmselect-cachedir",
+ Annotations: map[string]string{
+ "operator.victoriametrics.com/pvc-allow-volume-expansion": "true",
+ "test": "test",
+ },
+ Labels: map[string]string{"app": "vmselect"},
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("5Gi"),
+ },
+ },
+ },
+ }}
+ }),
+ },
+ validate: func(s *appsv1.StatefulSet) {
+ assert.Equal(t, "10Gi", s.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests.Storage().String())
+ },
+ actions: []k8stools.ClientAction{
+ {Verb: "Get", Kind: "StatefulSet", Resource: nn},
+ {Verb: "Patch", Kind: "StatefulSet", Resource: nn},
+ {Verb: "Delete", Kind: "StatefulSet", Resource: nn},
+ {Verb: "Get", Kind: "StatefulSet", Resource: nn},
+ {Verb: "Create", Kind: "StatefulSet", Resource: nn},
+ {Verb: "Get", Kind: "StatefulSet", Resource: nn},
+ },
+ })
+
// context deadline error
f(opts{
new: getSts(func(s *appsv1.StatefulSet) {
diff --git a/internal/controller/operator/factory/vlagent/vlagent.go b/internal/controller/operator/factory/vlagent/vlagent.go
index 458475ae1..a02aaad1c 100644
--- a/internal/controller/operator/factory/vlagent/vlagent.go
+++ b/internal/controller/operator/factory/vlagent/vlagent.go
@@ -544,7 +544,8 @@ func buildRemoteWriteArgs(cr *vmv1.VLAgent) ([]string, error) {
var storageLimit int64
if cr.Spec.Storage != nil {
- if storage, ok := cr.Spec.Storage.VolumeClaimTemplate.Spec.Resources.Requests[corev1.ResourceStorage]; ok {
+ storage := cr.Spec.Storage.VolumeClaimTemplate.Spec.Resources.Requests.Storage()
+ if !storage.IsZero() {
storageInt, ok := storage.AsInt64()
if ok {
storageLimit = storageInt
diff --git a/internal/controller/operator/factory/vlsingle/vlsingle.go b/internal/controller/operator/factory/vlsingle/vlsingle.go
index c32eb7d14..90248fba4 100644
--- a/internal/controller/operator/factory/vlsingle/vlsingle.go
+++ b/internal/controller/operator/factory/vlsingle/vlsingle.go
@@ -25,14 +25,18 @@ import (
)
const (
- dataDataDir = "/victoria-logs-data"
+ dataDir = "/victoria-logs-data"
tlsServerConfigMountPath = "/etc/vm/tls-server-secrets"
)
+func isStorageEmpty(pvc *corev1.PersistentVolumeClaimSpec) bool {
+ return pvc == nil || pvc.Resources.Requests.Storage().IsZero()
+}
+
func createOrUpdatePVC(ctx context.Context, rclient client.Client, cr, prevCR *vmv1.VLSingle) error {
newPvc := newPVC(cr)
var prevPVC *corev1.PersistentVolumeClaim
- if prevCR != nil && prevCR.Spec.Storage != nil {
+ if prevCR != nil && !isStorageEmpty(prevCR.Spec.Storage) {
prevPVC = newPVC(prevCR)
}
owner := cr.AsOwner()
@@ -69,7 +73,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VLSingl
return err
}
}
- if cr.Spec.Storage != nil {
+ if !isStorageEmpty(cr.Spec.Storage) {
if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil {
return err
}
@@ -147,7 +151,7 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) {
args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes))
}
- storagePath := dataDataDir
+ storagePath := dataDir
if r.Spec.StorageDataPath != "" {
storagePath = r.Spec.StorageDataPath
}
@@ -183,7 +187,7 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) {
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal})
var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
- if r.Spec.Storage != nil {
+ if !isStorageEmpty(r.Spec.Storage) {
pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: r.PrefixedName(),
}
diff --git a/internal/controller/operator/factory/vmdistributed/vmauth.go b/internal/controller/operator/factory/vmdistributed/vmauth.go
index 99dc28a13..da9050ecb 100644
--- a/internal/controller/operator/factory/vmdistributed/vmauth.go
+++ b/internal/controller/operator/factory/vmdistributed/vmauth.go
@@ -1,8 +1,8 @@
package vmdistributed
import (
- "cmp"
"slices"
+ "sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -11,7 +11,8 @@ import (
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
)
-func appendVMClusterTargetRefs(targetRefs []vmv1beta1.TargetRef, vmClusters []*vmv1beta1.VMCluster, excludeIds ...int) []vmv1beta1.TargetRef {
+func vmClusterTargetRef(vmClusters []*vmv1beta1.VMCluster, excludeIds ...int) vmv1beta1.TargetRef {
+ var urls []string
for i := range vmClusters {
if slices.Contains(excludeIds, i) {
continue
@@ -20,23 +21,23 @@ func appendVMClusterTargetRefs(targetRefs []vmv1beta1.TargetRef, vmClusters []*v
if vmCluster.CreationTimestamp.IsZero() {
continue
}
- targetRefs = append(targetRefs, vmv1beta1.TargetRef{
- URLMapCommon: vmv1beta1.URLMapCommon{
- LoadBalancingPolicy: ptr.To("first_available"),
- RetryStatusCodes: []int{500, 502, 503},
- },
- Paths: []string{"/select/.+", "/admin/tenants"},
- CRD: &vmv1beta1.CRDRef{
- Kind: "VMCluster/vmselect",
- Name: vmCluster.Name,
- Namespace: vmCluster.Namespace,
- },
- })
+ urls = append(urls, vmCluster.AsURL(vmv1beta1.ClusterComponentSelect))
+ }
+ sort.Strings(urls)
+ return vmv1beta1.TargetRef{
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("first_available"),
+ RetryStatusCodes: []int{500, 502, 503},
+ },
+ Paths: []string{"/select/.+", "/admin/tenants"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: urls,
+ },
}
- return targetRefs
}
-func appendVMAgentTargetRefs(targetRefs []vmv1beta1.TargetRef, vmAgents []*vmv1beta1.VMAgent, excludeIds ...int) []vmv1beta1.TargetRef {
+func vmAgentTargetRef(vmAgents []*vmv1beta1.VMAgent, excludeIds ...int) vmv1beta1.TargetRef {
+ var urls []string
for i := range vmAgents {
if slices.Contains(excludeIds, i) {
continue
@@ -45,20 +46,19 @@ func appendVMAgentTargetRefs(targetRefs []vmv1beta1.TargetRef, vmAgents []*vmv1b
if vmAgent.CreationTimestamp.IsZero() {
continue
}
- targetRefs = append(targetRefs, vmv1beta1.TargetRef{
- URLMapCommon: vmv1beta1.URLMapCommon{
- LoadBalancingPolicy: ptr.To("first_available"),
- RetryStatusCodes: []int{500, 502, 503},
- },
- Paths: []string{"/insert/.+", "/api/v1/write"},
- CRD: &vmv1beta1.CRDRef{
- Kind: "VMAgent",
- Name: vmAgent.Name,
- Namespace: vmAgent.Namespace,
- },
- })
+ urls = append(urls, vmAgent.AsURL())
+ }
+ sort.Strings(urls)
+ return vmv1beta1.TargetRef{
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("first_available"),
+ RetryStatusCodes: []int{500, 502, 503},
+ },
+ Paths: []string{"/insert/.+", "/api/v1/write"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: urls,
+ },
}
- return targetRefs
}
func buildVMAuthLB(cr *vmv1alpha1.VMDistributed, vmAgents []*vmv1beta1.VMAgent, vmClusters []*vmv1beta1.VMCluster, excludeIds ...int) *vmv1beta1.VMAuth {
@@ -77,14 +77,8 @@ func buildVMAuthLB(cr *vmv1alpha1.VMDistributed, vmAgents []*vmv1beta1.VMAgent,
vmAuth.Spec.UnauthorizedUserAccessSpec = &vmv1beta1.VMAuthUnauthorizedUserAccessSpec{}
}
var targetRefs []vmv1beta1.TargetRef
- targetRefs = appendVMClusterTargetRefs(targetRefs, vmClusters, excludeIds...)
- targetRefs = appendVMAgentTargetRefs(targetRefs, vmAgents, excludeIds...)
- slices.SortFunc(targetRefs, func(a, b vmv1beta1.TargetRef) int {
- return cmp.Or(
- cmp.Compare(a.CRD.Kind, b.CRD.Kind),
- cmp.Compare(a.CRD.Name, b.CRD.Name),
- )
- })
+ targetRefs = append(targetRefs, vmAgentTargetRef(vmAgents, excludeIds...))
+ targetRefs = append(targetRefs, vmClusterTargetRef(vmClusters, excludeIds...))
vmAuth.Spec.UnauthorizedUserAccessSpec.URLMap = nil
vmAuth.Spec.UnauthorizedUserAccessSpec.URLPrefix = nil
vmAuth.Spec.UnauthorizedUserAccessSpec.TargetRefs = targetRefs
diff --git a/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go b/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
index 0b6106567..52ec68a6b 100644
--- a/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
+++ b/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
@@ -343,8 +343,8 @@ func TestCreateOrUpdate(t *testing.T) {
},
}
var targetRefs []vmv1beta1.TargetRef
- targetRefs = appendVMAgentTargetRefs(targetRefs, d.zones.vmagents)
- targetRefs = appendVMClusterTargetRefs(targetRefs, []*vmv1beta1.VMCluster{d.zones.vmclusters[0]})
+ targetRefs = append(targetRefs, vmAgentTargetRef(d.zones.vmagents))
+ targetRefs = append(targetRefs, vmClusterTargetRef([]*vmv1beta1.VMCluster{d.zones.vmclusters[0]}))
lb.Spec.UnauthorizedUserAccessSpec = &vmv1beta1.VMAuthUnauthorizedUserAccessSpec{
TargetRefs: targetRefs,
}
@@ -405,27 +405,31 @@ func TestCreateOrUpdate(t *testing.T) {
vmAuth := buildVMAuthLB(d.cr, d.zones.vmagents, d.zones.vmclusters)
owner := d.cr.AsOwner()
assert.NoError(t, reconcile.VMAuth(ctx, rclient, vmAuth, nil, &owner))
- targetRefs := make([]vmv1beta1.TargetRef, 6)
+ var vmClusterURLs, vmAgentURLs []string
+ for i := range d.zones.vmagents {
+ vmAgentURLs = append(vmAgentURLs, d.zones.vmagents[i].AsURL())
+ }
+ for i := range d.zones.vmclusters {
+ vmClusterURLs = append(vmClusterURLs, d.zones.vmclusters[i].AsURL(vmv1beta1.ClusterComponentSelect))
+ }
+ targetRefs := []vmv1beta1.TargetRef{
+ {
+ Paths: []string{"/insert/.+", "/api/v1/write"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: vmAgentURLs,
+ },
+ },
+ {
+ Paths: []string{"/select/.+", "/admin/tenants"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: vmClusterURLs,
+ },
+ },
+ }
for i := range targetRefs {
targetRef := &targetRefs[i]
targetRef.LoadBalancingPolicy = ptr.To("first_available")
targetRef.RetryStatusCodes = []int{500, 502, 503}
- if i < len(d.zones.vmagents) {
- targetRef.CRD = &vmv1beta1.CRDRef{
- Name: d.zones.vmagents[i].Name,
- Namespace: d.zones.vmagents[i].Namespace,
- Kind: "VMAgent",
- }
- targetRef.Paths = []string{"/insert/.+", "/api/v1/write"}
- } else {
- idx := i - len(d.zones.vmagents)
- targetRef.CRD = &vmv1beta1.CRDRef{
- Name: d.zones.vmclusters[idx].Name,
- Namespace: d.zones.vmclusters[idx].Namespace,
- Kind: "VMCluster/vmselect",
- }
- targetRef.Paths = []string{"/select/.+", "/admin/tenants"}
- }
}
var got vmv1beta1.VMAuth
nsn := types.NamespacedName{
@@ -534,27 +538,31 @@ func TestCreateOrUpdate(t *testing.T) {
vmAuth := buildVMAuthLB(d.cr, d.zones.vmagents, d.zones.vmclusters)
owner := d.cr.AsOwner()
assert.NoError(t, reconcile.VMAuth(ctx, rclient, vmAuth, nil, &owner))
- targetRefs := make([]vmv1beta1.TargetRef, 6)
+ var vmClusterURLs, vmAgentURLs []string
+ for i := range d.zones.vmagents {
+ vmAgentURLs = append(vmAgentURLs, d.zones.vmagents[i].AsURL())
+ }
+ for i := range d.zones.vmclusters {
+ vmClusterURLs = append(vmClusterURLs, d.zones.vmclusters[i].AsURL(vmv1beta1.ClusterComponentSelect))
+ }
+ targetRefs := []vmv1beta1.TargetRef{
+ {
+ Paths: []string{"/insert/.+", "/api/v1/write"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: vmAgentURLs,
+ },
+ },
+ {
+ Paths: []string{"/select/.+", "/admin/tenants"},
+ Static: &vmv1beta1.StaticRef{
+ URLs: vmClusterURLs,
+ },
+ },
+ }
for i := range targetRefs {
targetRef := &targetRefs[i]
targetRef.LoadBalancingPolicy = ptr.To("first_available")
targetRef.RetryStatusCodes = []int{500, 502, 503}
- if i < len(d.zones.vmagents) {
- targetRef.CRD = &vmv1beta1.CRDRef{
- Name: d.zones.vmagents[i].Name,
- Namespace: d.zones.vmagents[i].Namespace,
- Kind: "VMAgent",
- }
- targetRef.Paths = []string{"/insert/.+", "/api/v1/write"}
- } else {
- idx := i - len(d.zones.vmagents)
- targetRef.CRD = &vmv1beta1.CRDRef{
- Name: d.zones.vmclusters[idx].Name,
- Namespace: d.zones.vmclusters[idx].Namespace,
- Kind: "VMCluster/vmselect",
- }
- targetRef.Paths = []string{"/select/.+", "/admin/tenants"}
- }
}
var got vmv1beta1.VMAuth
nsn := types.NamespacedName{
diff --git a/internal/controller/operator/factory/vmsingle/vmsingle.go b/internal/controller/operator/factory/vmsingle/vmsingle.go
index ebbb2b243..9fe657e5b 100644
--- a/internal/controller/operator/factory/vmsingle/vmsingle.go
+++ b/internal/controller/operator/factory/vmsingle/vmsingle.go
@@ -25,14 +25,18 @@ import (
)
const (
- dataDataDir = "/victoria-metrics-data"
+ dataDir = "/victoria-metrics-data"
streamAggrSecretKey = "config.yaml"
)
+func isStorageEmpty(pvc *corev1.PersistentVolumeClaimSpec) bool {
+ return pvc == nil || pvc.Resources.Requests.Storage().IsZero()
+}
+
func createStorage(ctx context.Context, rclient client.Client, cr, prevCR *vmv1beta1.VMSingle) error {
newPvc := makePvc(cr)
var prevPVC *corev1.PersistentVolumeClaim
- if prevCR != nil && prevCR.Spec.Storage != nil {
+ if prevCR != nil && !isStorageEmpty(prevCR.Spec.Storage) {
prevPVC = makePvc(prevCR)
}
owner := cr.AsOwner()
@@ -84,7 +88,7 @@ func CreateOrUpdate(ctx context.Context, cr *vmv1beta1.VMSingle, rclient client.
}
}
- if cr.Spec.Storage != nil {
+ if !isStorageEmpty(cr.Spec.Storage) {
if err := createStorage(ctx, rclient, cr, prevCR); err != nil {
return fmt.Errorf("cannot create storage: %w", err)
}
@@ -146,7 +150,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
args = append(args, fmt.Sprintf("-retentionPeriod=%s", cr.Spec.RetentionPeriod))
}
- storagePath := dataDataDir
+ storagePath := dataDir
if cr.Spec.StorageDataPath != "" {
storagePath = cr.Spec.StorageDataPath
}
@@ -176,7 +180,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
ports = build.AppendInsertPorts(ports, cr.Spec.InsertPorts)
var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
- if cr.Spec.Storage != nil {
+ if !isStorageEmpty(cr.Spec.Storage) {
pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: cr.PrefixedName(),
}
diff --git a/internal/controller/operator/factory/vtsingle/vtsingle.go b/internal/controller/operator/factory/vtsingle/vtsingle.go
index 97c71a87c..7fe3a4ab9 100644
--- a/internal/controller/operator/factory/vtsingle/vtsingle.go
+++ b/internal/controller/operator/factory/vtsingle/vtsingle.go
@@ -25,14 +25,18 @@ import (
)
const (
- dataDataDir = "/victoria-traces-data"
+ dataDir = "/victoria-traces-data"
tlsServerConfigMountPath = "/etc/vm/tls-server-secrets"
)
+func isStorageEmpty(pvc *corev1.PersistentVolumeClaimSpec) bool {
+ return pvc == nil || pvc.Resources.Requests.Storage().IsZero()
+}
+
func createOrUpdatePVC(ctx context.Context, rclient client.Client, cr, prevCR *vmv1.VTSingle) error {
newPvc := newPVC(cr)
var prevPVC *corev1.PersistentVolumeClaim
- if prevCR != nil && prevCR.Spec.Storage != nil {
+ if prevCR != nil && !isStorageEmpty(prevCR.Spec.Storage) {
prevPVC = newPVC(prevCR)
}
owner := cr.AsOwner()
@@ -70,7 +74,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VTSingl
}
}
owner := cr.AsOwner()
- if cr.Spec.Storage != nil {
+ if !isStorageEmpty(cr.Spec.Storage) {
if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil {
return err
}
@@ -147,7 +151,7 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) {
args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes))
}
- storagePath := dataDataDir
+ storagePath := dataDir
if r.Spec.StorageDataPath != "" {
storagePath = r.Spec.StorageDataPath
}
@@ -182,7 +186,7 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) {
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal})
var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
- if r.Spec.Storage != nil {
+ if !isStorageEmpty(r.Spec.Storage) {
pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: r.PrefixedName(),
}
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index ed3066bb8..f1a1406ec 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -31,7 +31,7 @@ import (
var (
eventualDeploymentAppReadyTimeout = 60 * time.Second
- eventualStatefulsetAppReadyTimeout = 80 * time.Second
+ eventualStatefulsetAppReadyTimeout = 120 * time.Second
eventualDeletionTimeout = 45 * time.Second
eventualDeploymentPodTimeout = 25 * time.Second
eventualExpandingTimeout = 25 * time.Second
diff --git a/test/e2e/vmcluster_test.go b/test/e2e/vmcluster_test.go
index a36a6a2c0..4096b8379 100644
--- a/test/e2e/vmcluster_test.go
+++ b/test/e2e/vmcluster_test.go
@@ -11,6 +11,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
@@ -1394,6 +1395,60 @@ up{baz="bar"} 123
},
},
),
+ Entry("by changing vmstorage PVC metadata", "vmstorage-pvc-meta", false,
+ &vmv1beta1.VMCluster{
+ Spec: vmv1beta1.VMClusterSpec{
+ RequestsLoadBalancer: vmv1beta1.VMAuthLoadBalancer{Enabled: true},
+ RetentionPeriod: "1",
+ VMStorage: &vmv1beta1.VMStorage{
+ CommonApplicationDeploymentParams: vmv1beta1.CommonApplicationDeploymentParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ Storage: &vmv1beta1.StorageSpec{
+ VolumeClaimTemplate: vmv1beta1.EmbeddedPersistentVolumeClaim{
+ EmbeddedObjectMetadata: vmv1beta1.EmbeddedObjectMetadata{
+ Labels: map[string]string{
+ "label-name": "before",
+ },
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("10Gi"),
+ },
+ },
+ },
+ },
+ },
+ },
+ VMSelect: &vmv1beta1.VMSelect{
+ CommonApplicationDeploymentParams: vmv1beta1.CommonApplicationDeploymentParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ VMInsert: &vmv1beta1.VMInsert{
+ CommonApplicationDeploymentParams: vmv1beta1.CommonApplicationDeploymentParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ },
+ testStep{
+ modify: func(cr *vmv1beta1.VMCluster) {
+ cr.Spec.VMStorage.Storage.VolumeClaimTemplate.Labels["label-name"] = "after"
+ },
+ verify: func(cr *vmv1beta1.VMCluster) {
+ expectedLabels := map[string]string{"label-name": "after"}
+ Expect(cr.Spec.VMStorage.Storage.VolumeClaimTemplate.Labels).To(Equal(expectedLabels))
+ var pvc corev1.PersistentVolumeClaim
+ nsn := types.NamespacedName{
+ Namespace: namespace,
+ Name: fmt.Sprintf("%s-%s-0", cr.Spec.VMStorage.GetStorageVolumeName(), cr.PrefixedName(vmv1beta1.ClusterComponentStorage)),
+ }
+ Expect(k8sClient.Get(ctx, nsn, &pvc)).ToNot(HaveOccurred())
+ },
+ },
+ ),
Entry("by changing annotations for created objects", "manage-annotations", false,
&vmv1beta1.VMCluster{
Spec: vmv1beta1.VMClusterSpec{