diff --git a/docs/10_overview.md b/docs/10_overview.md index f6835e0..589a762 100644 --- a/docs/10_overview.md +++ b/docs/10_overview.md @@ -39,3 +39,4 @@ Below, you can find additional documentation for `mlp`: - [Generation Configuration](./30_generate.md) - [Hydration Logic](./40_hydrate.md) - [Interpolatation Template](./50_interpolate.md) +- [Filtered Jobs](./60_filtered_job.md) diff --git a/docs/60_filtered_job.md b/docs/60_filtered_job.md new file mode 100644 index 0000000..55dc6ad --- /dev/null +++ b/docs/60_filtered_job.md @@ -0,0 +1,86 @@ +# Filtered Jobs + +`mlp` supports the execution of Kubernetes `Job` resources before the main deploy phase. These filtered jobs +can be used to run database migrations, data transformations, or any other task that must complete successfully +before the rest of the resources are applied to the cluster. + +## How It Works + +Filtered jobs are identified by the annotation `mia-platform.eu/deploy` on a `Job` resource. When the +`--filtered-job-annotation` flag is provided to the `deploy` command, `mlp` will scan the resources and +separate all `Job` resources whose annotation value matches the one provided. + +These jobs are executed before the remaining resources are applied. If the flag is not provided, any `Job` +resource carrying the `mia-platform.eu/deploy` annotation will be stripped from the resource list and not +applied at all. + +## Annotating a Job + +To mark a `Job` as a filtered job, add the `mia-platform.eu/deploy` annotation with the desired value: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: db-migration + annotations: + mia-platform.eu/deploy: pre-deploy +spec: + template: + spec: + restartPolicy: Never + containers: + - name: migrate + image: my-app:latest + command: ["./migrate"] +``` + +Then pass the matching value to the deploy command: + +```sh +mlp deploy --filtered-job-annotation pre-deploy ... +``` + +## Optional Jobs + +A filtered job can be marked as optional by adding the annotation `mia-platform.eu/deploy-optional: "true"`. +Optional jobs are non-blocking: if they fail, the failure is logged as a warning and the deploy process +continues normally. Mandatory jobs (those without the optional annotation) will block and fail the deploy if +they cannot complete successfully. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: optional-cleanup + annotations: + mia-platform.eu/deploy: pre-deploy + mia-platform.eu/deploy-optional: "true" +spec: + template: + spec: + restartPolicy: Never + containers: + - name: cleanup + image: my-app:latest + command: ["./cleanup"] +``` + +## Retry and Timeout + +Each filtered job is retried automatically on failure. Before each retry the failed job is deleted from +the cluster so that a fresh instance can be created. The number of retries and the per-execution timeout +can be controlled via dedicated flags: + +| Flag | Default | Description | +|---|---|---| +| `--filtered-job-annotation` | _(empty)_ | Annotation value used to identify filtered jobs | +| `--filtered-job-max-retries` | `3` | Maximum number of retry attempts for a failed job | +| `--filtered-job-timeout` | `30s` | Timeout for a single job execution attempt | + +If a job exceeds the configured timeout it is considered failed and the retry logic applies as normal. + +## Dry Run + +When the `--dry-run` flag is active, no jobs are created on the cluster. Instead, `mlp` will print a message +for each job that would have been executed, allowing you to verify the configuration without side effects. diff --git a/pkg/cmd/deploy/deploy.go b/pkg/cmd/deploy/deploy.go index 67ee20d..3564539 100644 --- a/pkg/cmd/deploy/deploy.go +++ b/pkg/cmd/deploy/deploy.go @@ -82,6 +82,17 @@ const ( waitFlagDefaultValue = true waitFlagUsage = "if true, wait for resources to be current before marking them as successfully applied" + filteredJobAnnotationFlagName = "filtered-job-annotation" + filteredJobAnnotationFlagUsage = "the annotation value for mia-platform.eu/deploy to identify filtered jobs" + + filteredJobMaxRetriesFlagName = "filtered-job-max-retries" + filteredJobMaxRetriesDefaultValue = 3 + filteredJobMaxRetriesFlagUsage = "the maximum number of retries for a failed filtered job" + + filteredJobTimeoutFlagName = "filtered-job-timeout" + filteredJobTimeoutDefaultValue = 30 * time.Second + filteredJobTimeoutFlagUsage = "the timeout for a single filtered job execution" + stdinToken = "-" fieldManager = "mlp" inventoryName = "eu.mia-platform.mlp" @@ -97,25 +108,31 @@ var ( // Flags contains all the flags for the `deploy` command. They will be converted to Options // that contains all runtime options for the command. type Flags struct { - ConfigFlags *genericclioptions.ConfigFlags - inputPaths []string - deployType string - forceDeploy bool - ensureNamespace bool - timeout time.Duration - dryRun bool - wait bool + ConfigFlags *genericclioptions.ConfigFlags + inputPaths []string + deployType string + forceDeploy bool + ensureNamespace bool + timeout time.Duration + dryRun bool + wait bool + filteredJobAnnotation string + filteredJobMaxRetries int + filteredJobTimeout time.Duration } // Options have the data required to perform the deploy operation type Options struct { - inputPaths []string - deployType string - forceDeploy bool - ensureNamespace bool - timeout time.Duration - dryRun bool - wait bool + inputPaths []string + deployType string + forceDeploy bool + ensureNamespace bool + timeout time.Duration + dryRun bool + wait bool + filteredJobAnnotation string + filteredJobMaxRetries int + filteredJobTimeout time.Duration clientFactory util.ClientFactory clock clock.PassiveClock @@ -185,6 +202,9 @@ func (f *Flags) AddFlags(flags *pflag.FlagSet) { flags.DurationVar(&f.timeout, timeoutFlagName, timeoutDefaultValue, timeoutFlagUsage) flags.BoolVar(&f.dryRun, dryRunFlagName, dryRunDefaultValue, dryRunFlagUsage) flags.BoolVar(&f.wait, waitFlagName, waitFlagDefaultValue, waitFlagUsage) + flags.StringVar(&f.filteredJobAnnotation, filteredJobAnnotationFlagName, "", filteredJobAnnotationFlagUsage) + flags.IntVar(&f.filteredJobMaxRetries, filteredJobMaxRetriesFlagName, filteredJobMaxRetriesDefaultValue, filteredJobMaxRetriesFlagUsage) + flags.DurationVar(&f.filteredJobTimeout, filteredJobTimeoutFlagName, filteredJobTimeoutDefaultValue, filteredJobTimeoutFlagUsage) } // ToOptions transform the command flags in command runtime arguments @@ -194,12 +214,15 @@ func (f *Flags) ToOptions(reader io.Reader, writer io.Writer) (*Options, error) } return &Options{ - inputPaths: f.inputPaths, - deployType: f.deployType, - forceDeploy: f.forceDeploy, - ensureNamespace: f.ensureNamespace, - timeout: f.timeout, - wait: f.wait, + inputPaths: f.inputPaths, + deployType: f.deployType, + forceDeploy: f.forceDeploy, + ensureNamespace: f.ensureNamespace, + timeout: f.timeout, + wait: f.wait, + filteredJobAnnotation: f.filteredJobAnnotation, + filteredJobMaxRetries: f.filteredJobMaxRetries, + filteredJobTimeout: f.filteredJobTimeout, clientFactory: util.NewFactory(f.ConfigFlags), reader: reader, @@ -247,6 +270,16 @@ func (o *Options) Run(ctx context.Context) error { return nil } + var stop bool + resources, stop, err = o.runFilteredJobPhase(ctx, namespace, resources) + if err != nil { + return err + } + if stop { + logger.V(3).Info("filtered jobs executed, skipping remaining resources apply") + return nil + } + deployIdentifier := map[string]string{ "time": o.clock.Now().Format(time.RFC3339), } @@ -299,13 +332,45 @@ loop: return nil } + return errors.New(formatApplyErrors(errorsDuringApplying)) +} + +func formatApplyErrors(errs []error) string { builder := new(strings.Builder) - fmt.Fprintf(builder, "applying process has encountered %d error(s):\n", len(errorsDuringApplying)) - for _, err := range errorsDuringApplying { + fmt.Fprintf(builder, "applying process has encountered %d error(s):\n", len(errs)) + for _, err := range errs { fmt.Fprintf(builder, "\t- %s\n", err) } + return builder.String() +} + +// runFilteredJobPhase handles filtered job execution. When the --filtered-job-annotation +// flag is set, matching jobs are extracted and executed; if any are found the +// caller should stop the normal apply phase (stop=true). If the flag is set but no jobs +// match the filter, nothing is applied (stop=true with empty resources). When the flag is +// absent, annotated jobs are stripped from the resource list so they are never applied as +// regular resources. +func (o *Options) runFilteredJobPhase(ctx context.Context, namespace string, resources []*unstructured.Unstructured) ([]*unstructured.Unstructured, bool, error) { + if o.filteredJobAnnotation == "" { + return StripAnnotatedJobs(resources), false, nil + } + + filteredJobs, remaining := FilterAnnotatedJobs(resources, o.filteredJobAnnotation) + if len(filteredJobs) == 0 { + return nil, true, nil + } + + clientSet, err := o.clientFactory.KubernetesClientSet() + if err != nil { + return nil, false, err + } + + runner := NewFilteredJobRunner(clientSet, namespace, o.filteredJobMaxRetries, o.filteredJobTimeout, o.writer, o.dryRun) + if err := runner.Run(ctx, filteredJobs); err != nil { + return nil, false, err + } - return errors.New(builder.String()) + return remaining, true, nil } func deployTypeFlagCompletionfunc(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { diff --git a/pkg/cmd/deploy/deploy_test.go b/pkg/cmd/deploy/deploy_test.go index 318bacd..b33e17d 100644 --- a/pkg/cmd/deploy/deploy_test.go +++ b/pkg/cmd/deploy/deploy_test.go @@ -95,18 +95,24 @@ func TestOptions(t *testing.T) { configFlags := genericclioptions.NewConfigFlags(false) expectedOpts := &Options{ - inputPaths: []string{"input"}, - deployType: "smart_deploy", - reader: reader, - writer: buffer, - clientFactory: util.NewFactory(configFlags), - clock: clock.RealClock{}, - wait: false, + inputPaths: []string{"input"}, + deployType: "smart_deploy", + filteredJobTimeout: 5 * time.Minute, + filteredJobMaxRetries: 3, + filteredJobAnnotation: "pre-deploy", + reader: reader, + writer: buffer, + clientFactory: util.NewFactory(configFlags), + clock: clock.RealClock{}, + wait: false, } flag := &Flags{ - inputPaths: []string{"input"}, - deployType: "smart_deploy", + inputPaths: []string{"input"}, + deployType: "smart_deploy", + filteredJobTimeout: 5 * time.Minute, + filteredJobMaxRetries: 3, + filteredJobAnnotation: "pre-deploy", } _, err := flag.ToOptions(reader, buffer) assert.ErrorContains(t, err, "config flags are required") diff --git a/pkg/cmd/deploy/filteredjob.go b/pkg/cmd/deploy/filteredjob.go new file mode 100644 index 0000000..5ffa7d6 --- /dev/null +++ b/pkg/cmd/deploy/filteredjob.go @@ -0,0 +1,250 @@ +// Copyright Mia srl +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deploy + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/go-logr/logr" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +const ( + filteredJobAnnotationKey = "mia-platform.eu/deploy" + deployOptionalAnnotation = "mia-platform.eu/deploy-optional" + jobKind = "Job" +) + +// FilteredJobRunner handles the creation, execution and monitoring of filtered jobs +// with configurable retry and timeout support. +type FilteredJobRunner struct { + clientSet kubernetes.Interface + namespace string + maxRetries int + timeout time.Duration + pollInterval time.Duration + writer io.Writer + dryRun bool +} + +// NewFilteredJobRunner creates a new FilteredJobRunner configured with the specified parameters +// for running filtered jobs against the target cluster. +func NewFilteredJobRunner(clientSet kubernetes.Interface, namespace string, maxRetries int, timeout time.Duration, writer io.Writer, dryRun bool) *FilteredJobRunner { + return &FilteredJobRunner{ + clientSet: clientSet, + namespace: namespace, + maxRetries: maxRetries, + timeout: timeout, + pollInterval: 1 * time.Second, + writer: writer, + dryRun: dryRun, + } +} + +// FilterAnnotatedJobs separates filtered jobs from the remaining resources based on the +// mia-platform.eu/deploy annotation matching the provided annotation value. +// It returns two slices: the matching filtered jobs and the remaining resources. +func FilterAnnotatedJobs(resources []*unstructured.Unstructured, annotationValue string) ([]*unstructured.Unstructured, []*unstructured.Unstructured) { + var filteredJobs []*unstructured.Unstructured + var remainingResources []*unstructured.Unstructured + + for _, res := range resources { + if res.GetKind() == jobKind { + annotations := res.GetAnnotations() + if val, ok := annotations[filteredJobAnnotationKey]; ok && val == annotationValue { + filteredJobs = append(filteredJobs, res) + continue + } + } + remainingResources = append(remainingResources, res) + } + + return filteredJobs, remainingResources +} + +// StripAnnotatedJobs removes all Job resources that carry the mia-platform.eu/deploy +// annotation, regardless of its value. This is used to exclude filtered jobs from the +// normal apply flow when the --filtered-job-annotation flag is not provided. +func StripAnnotatedJobs(resources []*unstructured.Unstructured) []*unstructured.Unstructured { + var remaining []*unstructured.Unstructured + + for _, res := range resources { + if res.GetKind() == jobKind { + if _, ok := res.GetAnnotations()[filteredJobAnnotationKey]; ok { + continue + } + } + remaining = append(remaining, res) + } + + return remaining +} + +// isOptionalFilteredJob reports whether the job carries the deploy-optional annotation set to "true". +func isOptionalFilteredJob(job *unstructured.Unstructured) bool { + return job.GetAnnotations()[deployOptionalAnnotation] == "true" +} + +// Run executes all filtered jobs with retry and timeout support. Each job is retried +// up to maxRetries times upon failure. Jobs annotated with mia-platform.eu/deploy-optional=true +// are treated as non-blocking: their failure is logged as a warning but never counted as an +// error. For mandatory jobs an error is returned only if ALL of them fail; if at least one +// mandatory job succeeds, the deploy process can continue. +func (r *FilteredJobRunner) Run(ctx context.Context, jobs []*unstructured.Unstructured) error { + logger := logr.FromContextOrDiscard(ctx) + + if len(jobs) == 0 { + logger.V(3).Info("no filtered jobs to run") + return nil + } + + if r.dryRun { + for _, job := range jobs { + fmt.Fprintf(r.writer, "filtered job %q would be executed (dry-run)\n", job.GetName()) + } + return nil + } + + logger.V(3).Info("starting filtered jobs", "count", len(jobs)) + + for _, job := range jobs { + jobName := job.GetName() + optional := isOptionalFilteredJob(job) + + err := r.runJobWithRetries(ctx, job) + if err != nil { + if optional { + logger.V(3).Info("optional filtered job failed, continuing", "name", jobName, "error", err) + fmt.Fprintf(r.writer, "optional filtered job %q failed, continuing\n", jobName) + } else { + logger.V(3).Info("filtered job failed", "name", jobName, "error", err) + return fmt.Errorf("filtered job %q failed after %d attempts: %w", jobName, r.maxRetries, err) + } + } else { + fmt.Fprintf(r.writer, "filtered job %q completed successfully\n", jobName) + } + } + + logger.V(3).Info("filtered jobs completed") + return nil +} + +// runJobWithRetries attempts to run a single filtered job, retrying up to maxRetries times +// on failure. The failed job is deleted before each retry attempt. +func (r *FilteredJobRunner) runJobWithRetries(ctx context.Context, jobUnstr *unstructured.Unstructured) error { + logger := logr.FromContextOrDiscard(ctx) + var lastErr error + + for attempt := 0; attempt <= r.maxRetries; attempt++ { + if attempt > 0 { + logger.V(3).Info("retrying filtered job", "name", jobUnstr.GetName(), "attempt", attempt) + fmt.Fprintf(r.writer, "retrying filtered job %q (attempt %d/%d)\n", jobUnstr.GetName(), attempt, r.maxRetries) + } + + lastErr = r.createAndWaitForJob(ctx, jobUnstr) + if lastErr == nil { + return nil + } + + logger.V(5).Info("filtered job attempt failed", "name", jobUnstr.GetName(), "attempt", attempt, "error", lastErr) + + // Clean up the failed job before retrying + if cleanErr := r.deleteJob(ctx, jobUnstr.GetName()); cleanErr != nil { + logger.V(5).Info("failed to clean up job", "name", jobUnstr.GetName(), "error", cleanErr) + } + } + + return lastErr +} + +// createAndWaitForJob creates a single job in the cluster and waits for its completion. +// It converts the unstructured resource to a typed Job, sets the namespace, and monitors +// the job status until completion, failure, or timeout. +func (r *FilteredJobRunner) createAndWaitForJob(ctx context.Context, jobUnstr *unstructured.Unstructured) error { + logger := logr.FromContextOrDiscard(ctx) + + var job batchv1.Job + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(jobUnstr.Object, &job); err != nil { + return fmt.Errorf("failed to convert job %q: %w", jobUnstr.GetName(), err) + } + + job.Namespace = r.namespace + // Clear resource version and status for creation + job.ResourceVersion = "" + job.Status = batchv1.JobStatus{} + + logger.V(5).Info("creating filtered job", "name", job.Name, "namespace", r.namespace) + + if _, err := r.clientSet.BatchV1().Jobs(r.namespace).Create(ctx, &job, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("failed to create job %q: %w", job.Name, err) + } + + return r.waitForJobCompletion(ctx, job.Name) +} + +// waitForJobCompletion polls the job status at regular intervals until the job completes, +// fails, or the configured timeout expires. +func (r *FilteredJobRunner) waitForJobCompletion(ctx context.Context, name string) error { + logger := logr.FromContextOrDiscard(ctx) + + timeoutCtx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + ticker := time.NewTicker(r.pollInterval) + defer ticker.Stop() + + for { + select { + case <-timeoutCtx.Done(): + return fmt.Errorf("job %q timed out after %s", name, r.timeout) + case <-ticker.C: + if timeoutCtx.Err() != nil { + return fmt.Errorf("job %q timed out after %s", name, r.timeout) + } + job, err := r.clientSet.BatchV1().Jobs(r.namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get job %q status: %w", name, err) + } + + logger.V(10).Info("polling job status", "name", name, "active", job.Status.Active, "succeeded", job.Status.Succeeded, "failed", job.Status.Failed) + + for _, condition := range job.Status.Conditions { + if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue { + return nil + } + if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue { + return fmt.Errorf("job %q failed: %s", name, condition.Message) + } + } + } + } +} + +// deleteJob removes a job and its associated pods from the cluster using background propagation +func (r *FilteredJobRunner) deleteJob(ctx context.Context, name string) error { + propagation := metav1.DeletePropagationBackground + return r.clientSet.BatchV1().Jobs(r.namespace).Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &propagation, + }) +} diff --git a/pkg/cmd/deploy/filteredjob_test.go b/pkg/cmd/deploy/filteredjob_test.go new file mode 100644 index 0000000..c8c371c --- /dev/null +++ b/pkg/cmd/deploy/filteredjob_test.go @@ -0,0 +1,798 @@ +// Copyright Mia srl +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deploy + +import ( + "errors" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +// newTestJob creates an unstructured Job resource for testing with the given name and optional +// filtered job annotation value. If annotationValue is empty, no annotation is added. +func newTestJob(name, annotationValue string) *unstructured.Unstructured { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []interface{}{ + map[string]interface{}{ + "name": "test", + "image": "busybox", + }, + }, + "restartPolicy": "Never", + }, + }, + }, + }, + } + + if annotationValue != "" { + obj.SetAnnotations(map[string]string{ + filteredJobAnnotationKey: annotationValue, + }) + } + + return obj +} + +// newOptionalTestJob creates a filtered Job with the deploy-optional annotation set to "true". +func newOptionalTestJob(name string) *unstructured.Unstructured { + job := newTestJob(name, "pre-deploy") + annotations := job.GetAnnotations() + annotations[deployOptionalAnnotation] = "true" + job.SetAnnotations(annotations) + return job +} + +func TestFilterAnnotatedJobs(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + resources []*unstructured.Unstructured + annotationValue string + expectedJobs int + expectedRemaining int + }{ + "no resources": { + resources: nil, + annotationValue: "pre-deploy", + expectedJobs: 0, + expectedRemaining: 0, + }, + "no matching jobs": { + resources: []*unstructured.Unstructured{ + newTestJob("job1", ""), + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{"name": "cm1"}, + }, + }, + }, + annotationValue: "pre-deploy", + expectedJobs: 0, + expectedRemaining: 2, + }, + "matching jobs separated from remaining": { + resources: []*unstructured.Unstructured{ + newTestJob("pre-job", "pre-deploy"), + newTestJob("normal-job", ""), + { + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{"name": "deploy1"}, + }, + }, + }, + annotationValue: "pre-deploy", + expectedJobs: 1, + expectedRemaining: 2, + }, + "non-job resource with annotation not filtered": { + resources: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cm-with-annotation", + "annotations": map[string]interface{}{filteredJobAnnotationKey: "pre-deploy"}, + }, + }, + }, + }, + annotationValue: "pre-deploy", + expectedJobs: 0, + expectedRemaining: 1, + }, + "job with different annotation value not filtered": { + resources: []*unstructured.Unstructured{ + newTestJob("job-once", "once"), + }, + annotationValue: "pre-deploy", + expectedJobs: 0, + expectedRemaining: 1, + }, + "multiple matching jobs": { + resources: []*unstructured.Unstructured{ + newTestJob("pre-job-1", "pre-deploy"), + newTestJob("pre-job-2", "pre-deploy"), + newTestJob("normal-job", ""), + }, + annotationValue: "pre-deploy", + expectedJobs: 2, + expectedRemaining: 1, + }, + "custom annotation value": { + resources: []*unstructured.Unstructured{ + newTestJob("custom-job", "custom-value"), + newTestJob("pre-job", "pre-deploy"), + }, + annotationValue: "custom-value", + expectedJobs: 1, + expectedRemaining: 1, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + jobs, remaining := FilterAnnotatedJobs(test.resources, test.annotationValue) + assert.Len(t, jobs, test.expectedJobs) + assert.Len(t, remaining, test.expectedRemaining) + }) + } +} + +func TestStripAnnotatedJobs(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + resources []*unstructured.Unstructured + expectedRemaining int + }{ + "no resources": { + resources: nil, + expectedRemaining: 0, + }, + "no annotated jobs": { + resources: []*unstructured.Unstructured{ + newTestJob("normal-job", ""), + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{"name": "cm1"}, + }, + }, + }, + expectedRemaining: 2, + }, + "strips jobs with any annotation value": { + resources: []*unstructured.Unstructured{ + newTestJob("pre-job", "pre-deploy"), + newTestJob("custom-job", "custom-value"), + newTestJob("normal-job", ""), + { + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{"name": "deploy1"}, + }, + }, + }, + expectedRemaining: 2, + }, + "non-job resource with annotation is kept": { + resources: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cm-annotated", + "annotations": map[string]interface{}{filteredJobAnnotationKey: "pre-deploy"}, + }, + }, + }, + }, + expectedRemaining: 1, + }, + "all annotated jobs stripped": { + resources: []*unstructured.Unstructured{ + newTestJob("job-1", "pre-deploy"), + newTestJob("job-2", "other"), + }, + expectedRemaining: 0, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + remaining := StripAnnotatedJobs(test.resources) + assert.Len(t, remaining, test.expectedRemaining) + }) + } +} + +func TestFilteredJobRunnerRun(t *testing.T) { + t.Parallel() + + namespace := "test-ns" + + tests := map[string]struct { + jobs []*unstructured.Unstructured + setupReactor func(*kubefake.Clientset) + dryRun bool + maxRetries int + expectedError string + expectedOut string + }{ + "no jobs returns nil": { + jobs: nil, + maxRetries: 3, + }, + "dry run skips execution": { + jobs: []*unstructured.Unstructured{newTestJob("pre-job", "pre-deploy")}, + dryRun: true, + maxRetries: 3, + expectedOut: "would be executed (dry-run)", + }, + "successful job completion": { + jobs: []*unstructured.Unstructured{newTestJob("pre-job", "pre-deploy")}, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.(k8stesting.GetAction).GetName(), + Namespace: namespace, + }, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, nil + }) + }, + maxRetries: 3, + expectedOut: "completed successfully", + }, + "all jobs fail returns error": { + jobs: []*unstructured.Unstructured{newTestJob("fail-job", "pre-deploy")}, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.(k8stesting.GetAction).GetName(), + Namespace: namespace, + }, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + Message: "BackoffLimitExceeded", + }, + }, + }, + }, nil + }) + }, + maxRetries: 1, + expectedError: "filtered job \"fail-job\" failed after 1 attempts:", + }, + "partial success stops at first failure": { + jobs: []*unstructured.Unstructured{ + newTestJob("success-job", "pre-deploy"), + newTestJob("fail-job", "pre-deploy"), + }, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + getAction := action.(k8stesting.GetAction) + name := getAction.GetName() + if name == "success-job" { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + } + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "failed"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedOut: "completed successfully", + expectedError: "filtered job \"fail-job\" failed after 0 attempts:", + }, + "multiple jobs stops at first failure": { + jobs: []*unstructured.Unstructured{ + newTestJob("fail-job-1", "pre-deploy"), + newTestJob("fail-job-2", "pre-deploy"), + }, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.(k8stesting.GetAction).GetName(), + Namespace: namespace, + }, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "error"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedError: "filtered job \"fail-job-1\" failed after 0 attempts:", + }, + "optional job failure does not block deploy": { + jobs: []*unstructured.Unstructured{newOptionalTestJob("optional-job")}, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.(k8stesting.GetAction).GetName(), + Namespace: namespace, + }, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "migration failed"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedOut: "optional filtered job", + }, + "optional job failure with mandatory success": { + jobs: []*unstructured.Unstructured{ + newTestJob("mandatory-job", "pre-deploy"), + newOptionalTestJob("optional-job"), + }, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + name := action.(k8stesting.GetAction).GetName() + if name == "mandatory-job" { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + } + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "optional failed"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedOut: "completed successfully", + }, + "mandatory job fails while optional succeeds": { + jobs: []*unstructured.Unstructured{ + newTestJob("mandatory-job", "pre-deploy"), + newOptionalTestJob("optional-job"), + }, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + name := action.(k8stesting.GetAction).GetName() + if name == "optional-job" { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + } + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "mandatory failed"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedError: "filtered job \"mandatory-job\" failed after 0 attempts:", + }, + "all optional jobs fail": { + jobs: []*unstructured.Unstructured{ + newOptionalTestJob("optional-job-1"), + newOptionalTestJob("optional-job-2"), + }, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.(k8stesting.GetAction).GetName(), + Namespace: namespace, + }, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "failed"}, + }, + }, + }, nil + }) + }, + maxRetries: 0, + expectedOut: "optional filtered job", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + fakeClientset := kubefake.NewSimpleClientset() + if test.setupReactor != nil { + test.setupReactor(fakeClientset) + } + + writer := new(strings.Builder) + runner := NewFilteredJobRunner(fakeClientset, namespace, test.maxRetries, 5*time.Second, writer, test.dryRun) + runner.pollInterval = 10 * time.Millisecond + + err := runner.Run(t.Context(), test.jobs) + + switch len(test.expectedError) { + case 0: + require.NoError(t, err) + default: + assert.ErrorContains(t, err, test.expectedError) + } + + if test.expectedOut != "" { + assert.Contains(t, writer.String(), test.expectedOut) + } + + t.Log(writer.String()) + }) + } +} + +func TestRunJobWithRetries(t *testing.T) { + t.Parallel() + + namespace := "test-ns" + + tests := map[string]struct { + maxRetries int + setupReactor func(*kubefake.Clientset) + expectedError string + }{ + "success on first attempt": { + maxRetries: 3, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + }) + }, + }, + "success after retry": { + maxRetries: 3, + setupReactor: func(cs *kubefake.Clientset) { + callCount := 0 + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + callCount++ + if callCount <= 1 { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "failed"}, + }, + }, + }, nil + } + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + }) + }, + }, + "all retries exhausted": { + maxRetries: 2, + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "BackoffLimitExceeded"}, + }, + }, + }, nil + }) + }, + expectedError: `job "test-job" failed: BackoffLimitExceeded`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + fakeClientset := kubefake.NewSimpleClientset() + test.setupReactor(fakeClientset) + + writer := new(strings.Builder) + runner := NewFilteredJobRunner(fakeClientset, namespace, test.maxRetries, 5*time.Second, writer, false) + runner.pollInterval = 10 * time.Millisecond + + job := newTestJob("test-job", "pre-deploy") + err := runner.runJobWithRetries(t.Context(), job) + + switch len(test.expectedError) { + case 0: + require.NoError(t, err) + default: + assert.ErrorContains(t, err, test.expectedError) + } + + t.Log(writer.String()) + }) + } +} + +func TestWaitForJobCompletion(t *testing.T) { + t.Parallel() + + namespace := "test-ns" + + tests := map[string]struct { + setupReactor func(*kubefake.Clientset) + timeout time.Duration + expectedError string + }{ + "job completes successfully": { + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + }) + }, + timeout: 5 * time.Second, + }, + "job fails": { + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobFailed, Status: corev1.ConditionTrue, Message: "container crashed"}, + }, + }, + }, nil + }) + }, + timeout: 5 * time.Second, + expectedError: `job "test-job" failed: container crashed`, + }, + "job times out": { + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + }, nil + }) + }, + timeout: 100 * time.Millisecond, + expectedError: `job "test-job" timed out`, + }, + "get job returns error": { + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("connection refused") + }) + }, + timeout: 5 * time.Second, + expectedError: `failed to get job "test-job" status`, + }, + // Regression test: when ticker.C and timeoutCtx.Done() fire simultaneously, Go's + // non-deterministic select may pick the ticker case. The reactor used here sleeps + // well past the timeout to guarantee the context is already cancelled by the time + // Get is called. Without the fix the error would be "failed to get job status: + // context deadline exceeded" rather than the canonical "timed out" message. + "timeout error is reported correctly when ticker races with timeout": { + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + // sleep long enough to ensure the timeout fires before or during Get + time.Sleep(200 * time.Millisecond) + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + }, nil + }) + }, + timeout: 10 * time.Millisecond, + expectedError: `job "test-job" timed out`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + fakeClientset := kubefake.NewSimpleClientset() + test.setupReactor(fakeClientset) + + writer := new(strings.Builder) + runner := NewFilteredJobRunner(fakeClientset, namespace, 3, test.timeout, writer, false) + runner.pollInterval = 10 * time.Millisecond + + err := runner.waitForJobCompletion(t.Context(), "test-job") + + switch len(test.expectedError) { + case 0: + require.NoError(t, err) + default: + assert.ErrorContains(t, err, test.expectedError) + } + }) + } +} + +func TestCreateAndWaitForJob(t *testing.T) { + t.Parallel() + + namespace := "test-ns" + + tests := map[string]struct { + job *unstructured.Unstructured + setupReactor func(*kubefake.Clientset) + expectedError string + }{ + "successful creation and completion": { + job: newTestJob("test-job", "pre-deploy"), + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("get", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job", Namespace: namespace}, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + {Type: batchv1.JobComplete, Status: corev1.ConditionTrue}, + }, + }, + }, nil + }) + }, + }, + "creation failure": { + job: newTestJob("test-job", "pre-deploy"), + setupReactor: func(cs *kubefake.Clientset) { + cs.PrependReactor("create", "jobs", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("forbidden") + }) + }, + expectedError: `failed to create job "test-job"`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + fakeClientset := kubefake.NewSimpleClientset() + test.setupReactor(fakeClientset) + + writer := new(strings.Builder) + runner := NewFilteredJobRunner(fakeClientset, namespace, 3, 5*time.Second, writer, false) + runner.pollInterval = 10 * time.Millisecond + + err := runner.createAndWaitForJob(t.Context(), test.job) + + switch len(test.expectedError) { + case 0: + require.NoError(t, err) + default: + assert.ErrorContains(t, err, test.expectedError) + } + }) + } +} + +func TestDeleteJob(t *testing.T) { + t.Parallel() + + namespace := "test-ns" + fakeClientset := kubefake.NewSimpleClientset(&batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: namespace, + }, + }) + + writer := new(strings.Builder) + runner := NewFilteredJobRunner(fakeClientset, namespace, 3, 30*time.Second, writer, false) + + err := runner.deleteJob(t.Context(), "test-job") + require.NoError(t, err) + + // Verify job is deleted + _, err = fakeClientset.BatchV1().Jobs(namespace).Get(t.Context(), "test-job", metav1.GetOptions{}) + assert.Error(t, err) +} diff --git a/pkg/cmd/deploy/testdata/filtered-job/configmap.yaml b/pkg/cmd/deploy/testdata/filtered-job/configmap.yaml new file mode 100644 index 0000000..f06e98f --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: example +data: + key: value diff --git a/pkg/cmd/deploy/testdata/filtered-job/deployment.yaml b/pkg/cmd/deploy/testdata/filtered-job/deployment.yaml new file mode 100644 index 0000000..a71a886 --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/deployment.yaml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example +spec: + selector: + matchLabels: + app: example + template: + metadata: + annotations: + mia-platform.eu/dependencies-checksum: predefined-value + labels: + app: example + spec: + containers: + - name: example + image: nginx:latest + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: ENV + valueFrom: + configMapKeyRef: + key: key + name: example + volumes: + - name: example + configMap: + name: example diff --git a/pkg/cmd/deploy/testdata/filtered-job/filtered-job-failed.yaml b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-failed.yaml new file mode 100644 index 0000000..e685000 --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-failed.yaml @@ -0,0 +1,20 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-migration-fail + annotations: + mia-platform.eu/deploy: pre-deploy +spec: + # backoffLimit is also enforced to 0 by mlp at apply time + backoffLimit: 0 + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - -c + - echo "migration failed!" && exit 1 + restartPolicy: Never diff --git a/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional-failed.yaml b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional-failed.yaml new file mode 100644 index 0000000..830dc50 --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional-failed.yaml @@ -0,0 +1,21 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-migration-fail + annotations: + mia-platform.eu/deploy: pre-deploy + mia-platform.eu/deploy-optional: 'true' +spec: + # backoffLimit is also enforced to 0 by mlp at apply time + backoffLimit: 0 + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - -c + - echo "migration failed!" && exit 1 + restartPolicy: Never diff --git a/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional.yaml b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional.yaml new file mode 100644 index 0000000..41d462a --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-optional.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-optional + annotations: + mia-platform.eu/deploy: pre-deploy + mia-platform.eu/deploy-optional: 'true' +spec: + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - '-c' + - echo "running migration && exit $(( RANDOM % 2 ))" + restartPolicy: Never diff --git a/pkg/cmd/deploy/testdata/filtered-job/filtered-job-sleep.yaml b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-sleep.yaml new file mode 100644 index 0000000..c88f33e --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/filtered-job-sleep.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-sleep + annotations: + mia-platform.eu/deploy: pre-deploy +spec: + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - -c + - echo 'Inizio...' && sleep 2 && echo 'Fine attesa!' + restartPolicy: Never diff --git a/pkg/cmd/deploy/testdata/filtered-job/filtered-job.yaml b/pkg/cmd/deploy/testdata/filtered-job/filtered-job.yaml new file mode 100644 index 0000000..6a6b008 --- /dev/null +++ b/pkg/cmd/deploy/testdata/filtered-job/filtered-job.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-migration + annotations: + mia-platform.eu/deploy: pre-deploy +spec: + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - -c + - echo "running migration" + restartPolicy: Never diff --git a/pkg/extensions/testdata/filter/filtered-job.yaml b/pkg/extensions/testdata/filter/filtered-job.yaml new file mode 100644 index 0000000..6b2601f --- /dev/null +++ b/pkg/extensions/testdata/filter/filtered-job.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: filtered-migration + annotations: + mia-platform.eu/deploy: pre-deploy +spec: + template: + metadata: {} + spec: + containers: + - name: migration + image: busybox + args: + - /bin/sh + - -c + - echo "running failing migration" && exit 1 + restartPolicy: Never diff --git a/pkg/extensions/testdata/filter/regular-job.yaml b/pkg/extensions/testdata/filter/regular-job.yaml new file mode 100644 index 0000000..cf95f55 --- /dev/null +++ b/pkg/extensions/testdata/filter/regular-job.yaml @@ -0,0 +1,16 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: regular-job +spec: + template: + metadata: {} + spec: + containers: + - name: worker + image: busybox + args: + - /bin/sh + - -c + - echo "working" + restartPolicy: Never