diff --git a/Makefile b/Makefile index 0126d00a..9d6eaae1 100644 --- a/Makefile +++ b/Makefile @@ -175,6 +175,7 @@ build-windows: save-version gen-version $(BUILD_UI) ## Build for Windows # Target: build-ui # Description: Builds the UI for the dashboard. # Usage: make build-ui +# TODO: use env var to define the build mode .PHONY: build-ui build-ui: gen-version ## Build UI for the dashboard @echo "🧀 Building UI for the dashboard ..." @@ -292,3 +293,33 @@ check: ## Check the lint, test, and build @$(MAKE) build || (echo "❌ Build check failed!" && exit 1) @echo "✅ Build check passed!" @echo "🎉 All checks passed successfully!" + +# controller-gen path +CONTROLLER_GEN = ${GOPATH}/bin/controller-gen +# controller-gen version +CONTROLLER_GEN_VERSION = v0.17.1 + +# Target: install-controller-gen +# Description: Install controller_gen. +# Usage: +# make install-controller-gen +.PHONY: install-controller-gen +install-controller-gen: + $(GO) install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION) + +# Target: generate-crds +# Description: Generate CRDs into a special dir. +# Usage: +# make generate-crds +.PHONY: generate-crds +generate-crds: + @# generate rbac, webhook and crds + $(CONTROLLER_GEN) crd paths="./pkg/kubernetes/apis/cluster/v1beta1/..." output:crd:artifacts:config=config/crds/ + $(CONTROLLER_GEN) crd paths="./pkg/kubernetes/apis/search/v1beta1/..." output:crd:artifacts:config=config/crds/ + +# Target: manifests +# Description: Install controller_gen and generate CRDs into a special dir. +# Usage: +# make manifests +.PHONY: manifests +manifests: install-controller-gen generate-crds diff --git a/api/openapispec/docs.go b/api/openapispec/docs.go index d588053b..b804a128 100644 --- a/api/openapispec/docs.go +++ b/api/openapispec/docs.go @@ -545,6 +545,20 @@ var doc = `{ "name": "description", "in": "formData", "required": true + }, + { + "type": "string", + "description": "cluster mode", + "name": "clusterMode", + "in": "formData", + "required": true + }, + { + "type": "integer", + "description": "cluster scale level", + "name": "clusterLevel", + "in": "formData", + "required": true } ], "responses": { @@ -920,6 +934,69 @@ var doc = `{ } } }, + "/rest-api/v1/cluster/{clusterName}/agentYml": { + "get": { + "description": "Obtain the agent yaml in secret for cluster.", + "consumes": [ + "text/plain", + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cluster" + ], + "summary": "Get agent yaml", + "parameters": [ + { + "type": "string", + "description": "The name of the cluster", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Verification passed server version", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "string" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "string" + } + }, + "429": { + "description": "Too Many Requests", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, "/rest-api/v1/clusters": { "get": { "description": "This endpoint lists all cluster resources.", @@ -2265,6 +2342,14 @@ var doc = `{ "cluster.ClusterPayload": { "type": "object", "properties": { + "clusterLevel": { + "description": "clusterLevel is the scale level of cluster to be created", + "type": "integer" + }, + "clusterMode": { + "description": "ClusterMode is the mode of cluster to be created", + "type": "string" + }, "description": { "description": "ClusterDescription is the description of cluster to be created", "type": "string" diff --git a/api/openapispec/swagger.json b/api/openapispec/swagger.json index d5092eea..78e30607 100644 --- a/api/openapispec/swagger.json +++ b/api/openapispec/swagger.json @@ -529,6 +529,20 @@ "name": "description", "in": "formData", "required": true + }, + { + "type": "string", + "description": "cluster mode", + "name": "clusterMode", + "in": "formData", + "required": true + }, + { + "type": "integer", + "description": "cluster scale level", + "name": "clusterLevel", + "in": "formData", + "required": true } ], "responses": { @@ -904,6 +918,69 @@ } } }, + "/rest-api/v1/cluster/{clusterName}/agentYml": { + "get": { + "description": "Obtain the agent yaml in secret for cluster.", + "consumes": [ + "text/plain", + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cluster" + ], + "summary": "Get agent yaml", + "parameters": [ + { + "type": "string", + "description": "The name of the cluster", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Verification passed server version", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "string" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "string" + } + }, + "429": { + "description": "Too Many Requests", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, "/rest-api/v1/clusters": { "get": { "description": "This endpoint lists all cluster resources.", @@ -2249,6 +2326,14 @@ "cluster.ClusterPayload": { "type": "object", "properties": { + "clusterLevel": { + "description": "clusterLevel is the scale level of cluster to be created", + "type": "integer" + }, + "clusterMode": { + "description": "ClusterMode is the mode of cluster to be created", + "type": "string" + }, "description": { "description": "ClusterDescription is the description of cluster to be created", "type": "string" diff --git a/api/openapispec/swagger.yaml b/api/openapispec/swagger.yaml index 46f4b1eb..9ce91cc6 100644 --- a/api/openapispec/swagger.yaml +++ b/api/openapispec/swagger.yaml @@ -86,6 +86,12 @@ definitions: type: object cluster.ClusterPayload: properties: + clusterLevel: + description: clusterLevel is the scale level of cluster to be created + type: integer + clusterMode: + description: ClusterMode is the mode of cluster to be created + type: string description: description: ClusterDescription is the description of cluster to be created type: string @@ -774,6 +780,48 @@ paths: summary: Update updates the cluster metadata by name. tags: - cluster + /rest-api/v1/cluster/{clusterName}/agentYml: + get: + consumes: + - text/plain + - application/json + description: Obtain the agent yaml in secret for cluster. + parameters: + - description: The name of the cluster + in: path + name: clusterName + required: true + type: string + produces: + - application/json + responses: + "200": + description: Verification passed server version + schema: + type: string + "400": + description: Bad Request + schema: + type: string + "401": + description: Unauthorized + schema: + type: string + "404": + description: Not Found + schema: + type: string + "429": + description: Too Many Requests + schema: + type: string + "500": + description: Internal Server Error + schema: + type: string + summary: Get agent yaml + tags: + - cluster /rest-api/v1/cluster/config/file: post: consumes: @@ -800,6 +848,16 @@ paths: name: description required: true type: string + - description: cluster mode + in: formData + name: clusterMode + required: true + type: string + - description: cluster scale level + in: formData + name: clusterLevel + required: true + type: integer produces: - text/plain responses: diff --git a/cmd/karpor/app/agent.go b/cmd/karpor/app/agent.go new file mode 100644 index 00000000..b1a1cd03 --- /dev/null +++ b/cmd/karpor/app/agent.go @@ -0,0 +1,127 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package app + +import ( + "context" + + esclient "github.com/elastic/go-elasticsearch/v8" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" + "github.com/KusionStack/karpor/pkg/syncer" + "github.com/KusionStack/karpor/pkg/syncer/utils" +) + +type agentOptions struct { + syncerOptions + ClusterName string + ClusterMode string +} + +func NewAgentOptions() *agentOptions { + return &agentOptions{ + syncerOptions: *NewSyncerOptions(), + } +} + +func (o *agentOptions) AddFlags(fs *pflag.FlagSet) { + o.syncerOptions.AddFlags(fs) + fs.StringVar(&o.ClusterName, "cluster-name", "", "The cluster name in hub cluster.") + fs.StringVar(&o.ClusterMode, "cluster-mode", "pull", "The cluster mode.") +} + +func NewAgentCommand(ctx context.Context) *cobra.Command { + options := NewAgentOptions() + cmd := &cobra.Command{ + Use: "agent", + Short: "start a resource syncer agent which deployed in user cluster", + RunE: func(cmd *cobra.Command, args []string) error { + // use the same logical as the Non-HA syncer in controller cluster + return runAgent(ctx, options) + }, + } + options.AddFlags(cmd.Flags()) + return cmd +} + +func runAgent(ctx context.Context, options *agentOptions) error { + ctrl.SetLogger(klogr.New()) + log := ctrl.Log.WithName("setup") + + if options.ClusterMode == clusterv1beta1.PushClusterMode { + // apply crds + dynamicClient, err := dynamic.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + return errors.Wrapf(err, "failed to build dynamic client for ageng") + } + err = utils.ApplyCrds(ctx, dynamicClient) + if err != nil { + return err + } + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: options.MetricsAddr, + HealthProbeBindAddress: options.ProbeAddr, + }) + if err != nil { + log.Error(err, "unable to start manager") + return err + } + + // TODO: add startup parameters to change the type of storage + //nolint:contextcheck + es, err := elasticsearch.NewStorage(esclient.Config{ + Addresses: options.ElasticSearchAddresses, + }) + if err != nil { + log.Error(err, "unable to init elasticsearch client") + return err + } + + //nolint:contextcheck + if err = syncer.NewAgentReconciler(es, options.ClusterName).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create resource syncer") + return err + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + log.Error(err, "unable to set up health check") + return err + } + + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + log.Error(err, "unable to set up ready check") + return err + } + + log.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + log.Error(err, "problem running manager") + return err + } + + return nil +} diff --git a/cmd/karpor/app/options/core_options.go b/cmd/karpor/app/options/core_options.go index 228fc4f6..7b2c6992 100644 --- a/cmd/karpor/app/options/core_options.go +++ b/cmd/karpor/app/options/core_options.go @@ -15,15 +15,18 @@ package options import ( - "github.com/KusionStack/karpor/pkg/kubernetes/registry" "github.com/spf13/pflag" + + "github.com/KusionStack/karpor/pkg/kubernetes/registry" ) type CoreOptions struct { - EnableRBAC bool - ReadOnlyMode bool - GithubBadge bool - Version bool + EnableRBAC bool + ReadOnlyMode bool + GithubBadge bool + Version bool + HighAvailability bool + EnableAI bool } func NewCoreOptions() *CoreOptions { @@ -50,4 +53,6 @@ func (o *CoreOptions) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&o.ReadOnlyMode, "read-only-mode", false, "turn on the read only mode") fs.BoolVar(&o.GithubBadge, "github-badge", false, "whether to display the github badge") fs.BoolVarP(&o.Version, "version", "V", o.Version, "Print version and exit") + fs.BoolVar(&o.HighAvailability, "high-availability", false, "whether to use high-availability feature.") + fs.BoolVar(&o.EnableAI, "enable-ai", false, "whether to enable llm.") } diff --git a/cmd/karpor/app/options/recommended.go b/cmd/karpor/app/options/recommended.go index 43b2ffc2..5f29e2c1 100644 --- a/cmd/karpor/app/options/recommended.go +++ b/cmd/karpor/app/options/recommended.go @@ -18,10 +18,6 @@ import ( "fmt" "time" - karporopenapi "github.com/KusionStack/karpor/pkg/kubernetes/generated/openapi" - k8sopenapi "github.com/KusionStack/karpor/pkg/kubernetes/openapi" - "github.com/KusionStack/karpor/pkg/kubernetes/registry" - "github.com/KusionStack/karpor/pkg/kubernetes/scheme" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -41,6 +37,11 @@ import ( "k8s.io/kube-openapi/pkg/common" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" "k8s.io/kubernetes/pkg/serviceaccount" + + karporopenapi "github.com/KusionStack/karpor/pkg/kubernetes/generated/openapi" + k8sopenapi "github.com/KusionStack/karpor/pkg/kubernetes/openapi" + "github.com/KusionStack/karpor/pkg/kubernetes/registry" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" ) // RecommendedOptions contains the recommended options for running an API server. diff --git a/cmd/karpor/app/server.go b/cmd/karpor/app/server.go index 2f513c32..60c5a5c6 100644 --- a/cmd/karpor/app/server.go +++ b/cmd/karpor/app/server.go @@ -26,12 +26,6 @@ import ( "os" "time" - "github.com/KusionStack/karpor/cmd/karpor/app/options" - "github.com/KusionStack/karpor/pkg/kubernetes/registry" - "github.com/KusionStack/karpor/pkg/kubernetes/scheme" - "github.com/KusionStack/karpor/pkg/server" - proxyutil "github.com/KusionStack/karpor/pkg/util/proxy" - "github.com/KusionStack/karpor/pkg/version" "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime/schema" @@ -45,6 +39,13 @@ import ( authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/serviceaccount" netutils "k8s.io/utils/net" + + "github.com/KusionStack/karpor/cmd/karpor/app/options" + "github.com/KusionStack/karpor/pkg/kubernetes/registry" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" + "github.com/KusionStack/karpor/pkg/server" + proxyutil "github.com/KusionStack/karpor/pkg/util/proxy" + "github.com/KusionStack/karpor/pkg/version" ) const ( diff --git a/cmd/karpor/app/syncer.go b/cmd/karpor/app/syncer.go index 90b07dd4..72ddb52e 100644 --- a/cmd/karpor/app/syncer.go +++ b/cmd/karpor/app/syncer.go @@ -16,22 +16,41 @@ package app import ( "context" + "crypto" + "crypto/x509" + + "k8s.io/klog/v2" "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" "github.com/KusionStack/karpor/pkg/kubernetes/scheme" "github.com/KusionStack/karpor/pkg/syncer" + "github.com/KusionStack/karpor/pkg/util/certgenerator" + esclient "github.com/elastic/go-elasticsearch/v8" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" ) +const ( + defaultCertFile = "/etc/karpor/ca/ca.crt" + defaultKeyFile = "/etc/karpor/ca/ca.key" +) + type syncerOptions struct { + HighAvailability bool + OnlyPushMode bool + MetricsAddr string ProbeAddr string ElasticSearchAddresses []string + + ExternalEndpoint string + AgentImageTag string + + CaCertFile string + CaKeyFile string } func NewSyncerOptions() *syncerOptions { @@ -39,9 +58,17 @@ func NewSyncerOptions() *syncerOptions { } func (o *syncerOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.HighAvailability, "high-availability", false, "Whether to use high-availability feature.") + fs.BoolVar(&o.OnlyPushMode, "only-push-mode", false, "Only push mode in high availability feature.") + fs.StringVar(&o.MetricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") fs.StringVar(&o.ProbeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") fs.StringSliceVar(&o.ElasticSearchAddresses, "elastic-search-addresses", nil, "The elastic search address.") + fs.StringVar(&o.ExternalEndpoint, "external-addresses", "", "The external address that expose to user cluster in pull mode.") + fs.StringVar(&o.AgentImageTag, "agent-image-tag", "v0.0.0", "The agent image tag.") + + fs.StringVar(&o.CaCertFile, "ca-cert-file", defaultCertFile, "Root CA certificate file for karpor server.") + fs.StringVar(&o.CaKeyFile, "ca-key-file", defaultKeyFile, "Root KEY file for karpor server..") } func NewSyncerCommand(ctx context.Context) *cobra.Command { @@ -50,14 +77,14 @@ func NewSyncerCommand(ctx context.Context) *cobra.Command { Use: "syncer", Short: "start a resource syncer to sync resource from clusters", RunE: func(cmd *cobra.Command, args []string) error { - return run(ctx, options) + return runSyncer(ctx, options) }, } options.AddFlags(cmd.Flags()) return cmd } -func run(ctx context.Context, options *syncerOptions) error { +func runSyncer(ctx context.Context, options *syncerOptions) error { ctrl.SetLogger(klog.NewKlogr()) log := ctrl.Log.WithName("setup") @@ -81,8 +108,18 @@ func run(ctx context.Context, options *syncerOptions) error { return err } + var caCert *x509.Certificate + var caKey crypto.Signer + if options.HighAvailability && !options.OnlyPushMode { + caCert, caKey, err = certgenerator.LoadCertificate(options.CaCertFile, options.CaKeyFile) + if err != nil { + log.Error(err, "unable to load certificate") + return err + } + } + //nolint:contextcheck - if err = syncer.NewSyncReconciler(es).SetupWithManager(mgr); err != nil { + if err = syncer.NewSyncReconciler(es, options.HighAvailability, options.ElasticSearchAddresses, options.ExternalEndpoint, options.AgentImageTag, caCert, caKey).SetupWithManager(mgr); err != nil { log.Error(err, "unable to create resource syncer") return err } diff --git a/cmd/karpor/main.go b/cmd/karpor/main.go index 748bcd08..c6890468 100644 --- a/cmd/karpor/main.go +++ b/cmd/karpor/main.go @@ -19,9 +19,10 @@ package main import ( "os" - "github.com/KusionStack/karpor/cmd/karpor/app" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/component-base/cli" + + "github.com/KusionStack/karpor/cmd/karpor/app" ) // @title Karpor @@ -33,7 +34,10 @@ func main() { cmd := app.NewServerCommand(ctx) syncCmd := app.NewSyncerCommand(ctx) + agentCmd := app.NewAgentCommand(ctx) + cmd.AddCommand(syncCmd) + cmd.AddCommand(agentCmd) code := cli.Run(cmd) os.Exit(code) diff --git a/config/agent.tpl b/config/agent.tpl new file mode 100644 index 00000000..c20f68d6 --- /dev/null +++ b/config/agent.tpl @@ -0,0 +1,137 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: karpor +spec: + finalizers: + - kubernetes +{{- if eq .ClusterMode "pull" }} +--- +apiVersion: v1 +data: + config: |- + apiVersion: v1 + clusters: + - cluster: + insecure-skip-tls-verify: true + server: {{ .ExternalEndpoint }} + name: karpor + contexts: + - context: + cluster: karpor + user: {{ .ClusterName }} + name: default + current-context: default + kind: Config + users: + - name: {{ .ClusterName }} + user: + client-certificate-data: {{ .CaCert }} + client-key-data: {{ .CaKey }} +kind: ConfigMap +metadata: + name: karpor-kubeconfig + namespace: karpor +{{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: karpor-agent + namespace: karpor +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: karpor-agent + app.kubernetes.io/instance: karpor + app.kubernetes.io/name: karpor + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/component: karpor-agent + app.kubernetes.io/instance: karpor + app.kubernetes.io/name: karpor + spec: + containers: + - args: + - agent + - --elastic-search-addresses={{ range .StorageAddresses }}{{.}} {{ end }} + - --cluster-name={{ .ClusterName }} + - --cluster-mode={{ .ClusterMode }} + command: + - /karpor +{{- if eq .ClusterMode "pull" }} + env: + - name: KUBECONFIG + value: /etc/karpor/config +{{- end }} + image: kusionstack/karpor:{{ .AgentImageTag }} + imagePullPolicy: IfNotPresent + name: karpor-agent + ports: + - containerPort: 7443 + protocol: TCP + resources: +{{- if eq .Level 3 }} + limits: + cpu: 1 + ephemeral-storage: 20Gi + memory: 2Gi + requests: + cpu: 500m + ephemeral-storage: 4Gi + memory: 512Mi +{{- else if eq .Level 2 }} + limits: + cpu: 500m + ephemeral-storage: 10Gi + memory: 1Gi + requests: + cpu: 250m + ephemeral-storage: 2Gi + memory: 256Mi +{{- else }} + limits: + cpu: 250m + ephemeral-storage: 5Gi + memory: 500Mi + requests: + cpu: 125m + ephemeral-storage: 1Gi + memory: 128Mi +{{- end }} +{{- if eq .ClusterMode "pull" }} + volumeMounts: + - mountPath: /etc/karpor/ + name: karpor-kubeconfig +{{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + terminationGracePeriodSeconds: 30 +{{- if eq .ClusterMode "pull" }} + volumes: + - configMap: + defaultMode: 420 + name: karpor-kubeconfig + name: karpor-kubeconfig +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: karpor +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: default + namespace: karpor diff --git a/config/crds/cluster.karpor.io_clusters.yaml b/config/crds/cluster.karpor.io_clusters.yaml new file mode 100644 index 00000000..435f8ea1 --- /dev/null +++ b/config/crds/cluster.karpor.io_clusters.yaml @@ -0,0 +1,139 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: clusters.cluster.karpor.io +spec: + group: cluster.karpor.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: Cluster is an extension type to access a cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + access: + properties: + caBundle: + format: byte + type: string + credential: + properties: + execConfig: + properties: + apiVersion: + type: string + args: + items: + type: string + type: array + command: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + installHint: + type: string + interactiveMode: + type: string + provideClusterInfo: + type: boolean + required: + - args + - command + - env + - provideClusterInfo + type: object + serviceAccountToken: + type: string + type: + type: string + x509: + properties: + certificate: + format: byte + type: string + privateKey: + format: byte + type: string + required: + - certificate + - privateKey + type: object + required: + - type + type: object + endpoint: + type: string + insecure: + type: boolean + required: + - endpoint + type: object + description: + type: string + displayName: + type: string + finalized: + type: boolean + level: + description: cluster scale level, optional value 1, 2, 3, default + 1 + type: integer + mode: + type: string + provider: + type: string + required: + - access + - displayName + - level + - provider + type: object + status: + properties: + healthy: + type: boolean + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/search.karpor.io_syncregistries.yaml b/config/crds/search.karpor.io_syncregistries.yaml new file mode 100644 index 00000000..25421f72 --- /dev/null +++ b/config/crds/search.karpor.io_syncregistries.yaml @@ -0,0 +1,286 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: syncregistries.search.karpor.io +spec: + group: search.karpor.io + names: + kind: SyncRegistry + listKind: SyncRegistryList + plural: syncregistries + singular: syncregistry + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + clusterLabelSelector: + description: ClusterLabelSelector is used to filter the target clusters + that need to be synced from. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + clusters: + description: Clusters is the list of the target clusters to be be + synced from. + items: + type: string + type: array + syncResources: + items: + description: ResourceSyncRule is used to specify the way to sync + the specified resource + properties: + apiVersion: + description: APIVersion represents the group version of the + target resource. + type: string + maxConcurrent: + description: 'MaxConcurrent is the maximum number of workers + (default: 10)' + type: integer + namespace: + description: |- + Namespace specifies the namespace in which the ListWatch of the target resources is limited + to. + type: string + remainAfterDeleted: + description: RemainAfterDeleted indicates whether the resource + should remain in ES after being deleted in k8s. + type: boolean + resource: + description: Resource is the the target resource. + type: string + resyncPeriod: + description: ResynPeriod is the period to resync + type: string + selectors: + description: Selectors are used to filter the target resources + to sync. Multiple selectors are ORed. + items: + description: Selector represents a resource filter + properties: + fieldSelector: + description: |- + FieldSelector is a filter to select resources by fields. + If non-nil and non-empty, only the resource match this filter will be selected. + properties: + matchFields: + additionalProperties: + type: string + description: |- + MatchFields is a map of {field,value} pairs. A single {field,value} in the matchFields + map means that the specified field should have an exact match with the specified value. + Multiple entries are ANDed. + type: object + serverSupported: + description: |- + ServerSupported indicates whether the matchFields is supported by the API server. + If not supported, the client-side filtering will be utilized instead." + type: boolean + type: object + labelSelector: + description: |- + LabelSelector is a filter to select resources by labels. + If non-nil and non-empty, only the resource match this filter will be selected. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + transform: + description: |- + Transform is the rule applied to the original resource to transform it to the desired target + resource. + properties: + type: + description: Type is the type of transformer. + type: string + valueTemplate: + description: ValueTemplate is the template of the input + data to be paased to the transformer + type: string + required: + - type + - valueTemplate + type: object + transformRefName: + description: TransformRefName is the name of the TransformRule + type: string + trim: + description: Trim defines the trimming strategy for the resources + of the current type. + properties: + retain: + description: Retain specifies which fields should be retained + after trimming. + properties: + jsonPaths: + description: |- + JSONPaths specifies the path of the field to be retained. + For usage, please refer to https://kubernetes.io/docs/reference/kubectl/jsonpath/ + items: + type: string + type: array + type: object + type: object + trimRefName: + description: TrimRefName is the name of the TrimRule. + type: string + required: + - apiVersion + - resource + type: object + type: array + syncResourcesRefName: + type: string + type: object + status: + properties: + clusters: + items: + properties: + cluster: + type: string + resources: + items: + properties: + apiVersion: + type: string + kind: + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + required: + - apiVersion + - kind + - lastTransitionTime + - status + type: object + type: array + status: + type: string + required: + - cluster + - status + type: object + type: array + lastTransitionTime: + format: date-time + type: string + required: + - lastTransitionTime + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/search.karpor.io_syncresources.yaml b/config/crds/search.karpor.io_syncresources.yaml new file mode 100644 index 00000000..bcfa063b --- /dev/null +++ b/config/crds/search.karpor.io_syncresources.yaml @@ -0,0 +1,188 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: syncresources.search.karpor.io +spec: + group: search.karpor.io + names: + kind: SyncResources + listKind: SyncResourcesList + plural: syncresources + singular: syncresources + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + syncResources: + items: + description: ResourceSyncRule is used to specify the way to sync + the specified resource + properties: + apiVersion: + description: APIVersion represents the group version of the + target resource. + type: string + maxConcurrent: + description: 'MaxConcurrent is the maximum number of workers + (default: 10)' + type: integer + namespace: + description: |- + Namespace specifies the namespace in which the ListWatch of the target resources is limited + to. + type: string + remainAfterDeleted: + description: RemainAfterDeleted indicates whether the resource + should remain in ES after being deleted in k8s. + type: boolean + resource: + description: Resource is the the target resource. + type: string + resyncPeriod: + description: ResynPeriod is the period to resync + type: string + selectors: + description: Selectors are used to filter the target resources + to sync. Multiple selectors are ORed. + items: + description: Selector represents a resource filter + properties: + fieldSelector: + description: |- + FieldSelector is a filter to select resources by fields. + If non-nil and non-empty, only the resource match this filter will be selected. + properties: + matchFields: + additionalProperties: + type: string + description: |- + MatchFields is a map of {field,value} pairs. A single {field,value} in the matchFields + map means that the specified field should have an exact match with the specified value. + Multiple entries are ANDed. + type: object + serverSupported: + description: |- + ServerSupported indicates whether the matchFields is supported by the API server. + If not supported, the client-side filtering will be utilized instead." + type: boolean + type: object + labelSelector: + description: |- + LabelSelector is a filter to select resources by labels. + If non-nil and non-empty, only the resource match this filter will be selected. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + transform: + description: |- + Transform is the rule applied to the original resource to transform it to the desired target + resource. + properties: + type: + description: Type is the type of transformer. + type: string + valueTemplate: + description: ValueTemplate is the template of the input + data to be paased to the transformer + type: string + required: + - type + - valueTemplate + type: object + transformRefName: + description: TransformRefName is the name of the TransformRule + type: string + trim: + description: Trim defines the trimming strategy for the resources + of the current type. + properties: + retain: + description: Retain specifies which fields should be retained + after trimming. + properties: + jsonPaths: + description: |- + JSONPaths specifies the path of the field to be retained. + For usage, please refer to https://kubernetes.io/docs/reference/kubectl/jsonpath/ + items: + type: string + type: array + type: object + type: object + trimRefName: + description: TrimRefName is the name of the TrimRule. + type: string + required: + - apiVersion + - resource + type: object + type: array + type: object + type: object + served: true + storage: true diff --git a/config/crds/search.karpor.io_transformrules.yaml b/config/crds/search.karpor.io_transformrules.yaml new file mode 100644 index 00000000..811e3388 --- /dev/null +++ b/config/crds/search.karpor.io_transformrules.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: transformrules.search.karpor.io +spec: + group: search.karpor.io + names: + kind: TransformRule + listKind: TransformRuleList + plural: transformrules + singular: transformrule + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: |- + TransformRule is used to define the rule to transform the original resource into the desired + target resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + type: + description: Type is the type of transformer. + type: string + valueTemplate: + description: ValueTemplate is the template of the input data to be + paased to the transformer + type: string + required: + - type + - valueTemplate + type: object + type: object + served: true + storage: true diff --git a/config/crds/search.karpor.io_trimrules.yaml b/config/crds/search.karpor.io_trimrules.yaml new file mode 100644 index 00000000..72dbf84f --- /dev/null +++ b/config/crds/search.karpor.io_trimrules.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: trimrules.search.karpor.io +spec: + group: search.karpor.io + names: + kind: TrimRule + listKind: TrimRuleList + plural: trimrules + singular: trimrule + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: |- + TrimRule defines the strategy of trimming k8s objects, which can save + informer memory by discarding redundant fields. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + retain: + description: Retain specifies which fields should be retained after + trimming. + properties: + jsonPaths: + description: |- + JSONPaths specifies the path of the field to be retained. + For usage, please refer to https://kubernetes.io/docs/reference/kubectl/jsonpath/ + items: + type: string + type: array + type: object + type: object + type: object + served: true + storage: true diff --git a/config/embed.go b/config/embed.go index 5c3ac5bb..98d453f6 100644 --- a/config/embed.go +++ b/config/embed.go @@ -32,3 +32,23 @@ var DefaultRelationship []byte //go:embed default-sync-strategy.yaml var DefaultSyncStrategy []byte + +var CrdList = [][]byte{ClustersCrd, SyncRegistriesCrd, SyncResourcesCrd, TransformRulesCrd, TrimRulesCrd} + +//go:embed crds/cluster.karpor.io_clusters.yaml +var ClustersCrd []byte + +//go:embed crds/search.karpor.io_syncregistries.yaml +var SyncRegistriesCrd []byte + +//go:embed crds/search.karpor.io_syncresources.yaml +var SyncResourcesCrd []byte + +//go:embed crds/search.karpor.io_transformrules.yaml +var TransformRulesCrd []byte + +//go:embed crds/search.karpor.io_trimrules.yaml +var TrimRulesCrd []byte + +//go:embed agent.tpl +var AgentTpl []byte diff --git a/docs/api.md b/docs/api.md index a1b0cb59..debd6b3d 100644 --- a/docs/api.md +++ b/docs/api.md @@ -46,6 +46,7 @@ Karpor is a brand new Kubernetes visualization tool that focuses on search, insi |---------|---------|--------|---------| | DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | | GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName}/agentYml | [get rest API v1 cluster cluster name agent yml](#get-rest-api-v1-cluster-cluster-name-agent-yml) | Get agent yaml | | GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | | POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | | POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | @@ -657,6 +658,94 @@ Status: Internal Server Error +### Get agent yaml (*GetRestAPIV1ClusterClusterNameAgentYml*) + +``` +GET /rest-api/v1/cluster/{clusterName}/agentYml +``` + +Obtain the agent yaml in secret for cluster. + +#### Consumes + * application/json + * text/plain + +#### Produces + * application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +|------|--------|------|---------|-----------| :------: |---------|-------------| +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses +| Code | Status | Description | Has headers | Schema | +|------|--------|-------------|:-----------:|--------| +| [200](#get-rest-api-v1-cluster-cluster-name-agent-yml-200) | OK | Verification passed server version | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-agent-yml-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-agent-yml-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-agent-yml-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-404-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-agent-yml-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-agent-yml-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-agent-yml-500-schema) | + +#### Responses + + +##### 200 - Verification passed server version +Status: OK + +###### Schema + + + + + +##### 400 - Bad Request +Status: Bad Request + +###### Schema + + + + + +##### 401 - Unauthorized +Status: Unauthorized + +###### Schema + + + + + +##### 404 - Not Found +Status: Not Found + +###### Schema + + + + + +##### 429 - Too Many Requests +Status: Too Many Requests + +###### Schema + + + + + +##### 500 - Internal Server Error +Status: Internal Server Error + +###### Schema + + + + + ### List lists all cluster resources. (*GetRestAPIV1Clusters*) ``` @@ -2228,6 +2317,8 @@ Uploads a KubeConfig file for cluster, with a maximum size of 2MB. | Name | Source | Type | Go type | Separator | Required | Default | Description | |------|--------|------|---------|-----------| :------: |---------|-------------| +| clusterLevel | `formData` | integer | `int64` | | ✓ | | cluster scale level | +| clusterMode | `formData` | string | `string` | | ✓ | | cluster mode | | description | `formData` | string | `string` | | ✓ | | cluster description | | displayName | `formData` | string | `string` | | ✓ | | cluster display name | | file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | @@ -2801,6 +2892,8 @@ Status: Internal Server Error | Name | Type | Go type | Required | Default | Description | Example | |------|------|---------|:--------:| ------- |-------------|---------| +| clusterLevel | integer| `int64` | | | clusterLevel is the scale level of cluster to be created | | +| clusterMode | string| `string` | | | ClusterMode is the mode of cluster to be created | | | description | string| `string` | | | ClusterDescription is the description of cluster to be created | | | displayName | string| `string` | | | ClusterDisplayName is the display name of cluster to be created | | | kubeConfig | string| `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | @@ -2924,10 +3017,16 @@ Status: Internal Server Error | Name | Type | Go type | Required | Default | Description | Example | |------|------|---------|:--------:| ------- |-------------|---------| -| issuesTotal | integer| `int64` | | | IssuesTotal is the total count of all issues found during the audit.
This count can be used to understand the overall number of problems
that need to be addressed. | | +| issuesTotal | integer| `int64` | | | IssuesTotal is the total count of all issues found during the audit. +This count can be used to understand the overall number of problems +that need to be addressed. | | | resourceTotal | integer| `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | -| score | number| `float64` | | | Score represents the calculated score of the audited manifest based on
the number and severity of issues. It provides a quantitative measure
of the security posture of the resources in the manifest. | | -| severityStatistic | map of integer| `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective
number of occurrences. It allows for a quick overview of the distribution
of issues across different severity categories. | | +| score | number| `float64` | | | Score represents the calculated score of the audited manifest based on +the number and severity of issues. It provides a quantitative measure +of the security posture of the resources in the manifest. | | +| severityStatistic | map of integer| `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective +number of occurrences. It allows for a quick overview of the distribution +of issues across different severity categories. | | @@ -3010,5 +3109,8 @@ Status: Internal Server Error | Name | Type | Go type | Required | Default | Description | Example | |------|------|---------|:--------:| ------- |-------------|---------| -| object | [interface{}](#interface)| `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or
map[string]interface{}
children. | | +| object | [interface{}](#interface)| `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or +map[string]interface{} +children. | | + diff --git a/docs/cli/karpor.md b/docs/cli/karpor.md index 41c5e576..f3a2b2a1 100644 --- a/docs/cli/karpor.md +++ b/docs/cli/karpor.md @@ -15,10 +15,14 @@ karpor [flags] ``` --admission-control-config-file string File with admission control configuration. --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. - --ai-auth-token string The ai auth token (same as api key) + --ai-auth-token string The ai auth token --ai-backend string The ai backend (default "openai") --ai-base-url string The ai base url + --ai-http-proxy string The ai http proxy + --ai-https-proxy string The ai https proxy --ai-model string The ai model (default "gpt-3.5-turbo") + --ai-no-proxy string The ai no-proxy + --ai-proxy-enabled The ai proxy enable --ai-temperature float32 The ai temperature (default 1) --ai-top-p float32 The ai top-p (default 1) --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) @@ -32,7 +36,7 @@ karpor [flags] --audit-log-compress If set, the rotated log files will be compressed using gzip. --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. - --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxbackup int The maximum number of old audit log files to retain. --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. @@ -65,18 +69,19 @@ karpor [flags] --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. --contention-profiling Enable lock contention profiling, if profiling is enabled --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --default-watch-cache-size int Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set. (default 100) --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) - --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook]) --egress-selector-config-file string File with apiserver egress selector configuration. --elastic-search-addresses strings The elastic search address --elastic-search-password string The elastic search password --elastic-search-username string The elastic search username - --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-ai whether to enable llm. --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) --enable-rbac trun on to enable RBAC authorization --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd - --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. --etcd-certfile string SSL certification file used to secure etcd communication. --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) @@ -85,7 +90,6 @@ karpor [flags] --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) --etcd-keyfile string SSL key file used to secure etcd communication. --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") - --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). @@ -93,113 +97,109 @@ karpor [flags] APIListChunking=true|false (BETA - default=true) APIPriorityAndFairness=true|false (BETA - default=true) APIResponseCompression=true|false (BETA - default=true) - APISelfSubjectReview=true|false (ALPHA - default=false) - APIServerIdentity=true|false (BETA - default=true) + APIServerIdentity=true|false (ALPHA - default=false) APIServerTracing=true|false (ALPHA - default=false) - AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) AllAlpha=true|false (ALPHA - default=false) AllBeta=true|false (BETA - default=false) - AnyVolumeDataSource=true|false (BETA - default=true) + AnyVolumeDataSource=true|false (ALPHA - default=false) AppArmor=true|false (BETA - default=true) - CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) - CPUManagerPolicyBetaOptions=true|false (BETA - default=true) - CPUManagerPolicyOptions=true|false (BETA - default=true) - CSIMigrationPortworx=true|false (BETA - default=false) - CSIMigrationRBD=true|false (ALPHA - default=false) - CSINodeExpandSecret=true|false (ALPHA - default=false) + CPUManager=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (ALPHA - default=false) + CSIInlineVolume=true|false (BETA - default=true) + CSIMigration=true|false (BETA - default=true) + CSIMigrationAWS=true|false (BETA - default=false) + CSIMigrationAzureDisk=true|false (BETA - default=false) + CSIMigrationAzureFile=true|false (BETA - default=false) + CSIMigrationGCE=true|false (BETA - default=false) + CSIMigrationOpenStack=true|false (BETA - default=true) + CSIMigrationvSphere=true|false (BETA - default=false) + CSIStorageCapacity=true|false (BETA - default=true) + CSIVolumeFSGroupPolicy=true|false (BETA - default=true) CSIVolumeHealth=true|false (ALPHA - default=false) - ComponentSLIs=true|false (ALPHA - default=false) - ContainerCheckpoint=true|false (ALPHA - default=false) - CronJobTimeZone=true|false (BETA - default=true) - CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CSRDuration=true|false (BETA - default=true) + ConfigurableFSGroupPolicy=true|false (BETA - default=true) + ControllerManagerLeaderMigration=true|false (BETA - default=true) CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) - CustomResourceValidationExpressions=true|false (BETA - default=true) + DaemonSetUpdateSurge=true|false (BETA - default=true) + DefaultPodTopologySpread=true|false (BETA - default=true) + DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false) + DevicePlugins=true|false (BETA - default=true) + DisableAcceleratorUsageMetrics=true|false (BETA - default=true) DisableCloudProviders=true|false (ALPHA - default=false) - DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) - DownwardAPIHugePages=true|false (BETA - default=true) - DynamicResourceAllocation=true|false (ALPHA - default=false) - EventedPLEG=true|false (ALPHA - default=false) - ExpandedDNSConfig=true|false (BETA - default=true) + DownwardAPIHugePages=true|false (BETA - default=false) + EfficientWatchResumption=true|false (BETA - default=true) + EndpointSliceTerminatingCondition=true|false (BETA - default=true) + EphemeralContainers=true|false (ALPHA - default=false) + ExpandCSIVolumes=true|false (BETA - default=true) + ExpandInUsePersistentVolumes=true|false (BETA - default=true) + ExpandPersistentVolumes=true|false (BETA - default=true) + ExpandedDNSConfig=true|false (ALPHA - default=false) ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) - GRPCContainerProbe=true|false (BETA - default=true) + GenericEphemeralVolume=true|false (BETA - default=true) GracefulNodeShutdown=true|false (BETA - default=true) - GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) HPAContainerMetrics=true|false (ALPHA - default=false) HPAScaleToZero=true|false (ALPHA - default=false) - HonorPVReclaimPolicy=true|false (ALPHA - default=false) - IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + IPv6DualStack=true|false (BETA - default=true) InTreePluginAWSUnregister=true|false (ALPHA - default=false) InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) InTreePluginGCEUnregister=true|false (ALPHA - default=false) InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) - InTreePluginPortworxUnregister=true|false (ALPHA - default=false) - InTreePluginRBDUnregister=true|false (ALPHA - default=false) InTreePluginvSphereUnregister=true|false (ALPHA - default=false) - JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) - JobPodFailurePolicy=true|false (BETA - default=true) - JobReadyPods=true|false (BETA - default=true) - KMSv2=true|false (ALPHA - default=false) + IndexedJob=true|false (BETA - default=true) + IngressClassNamespacedParams=true|false (BETA - default=true) + JobTrackingWithFinalizers=true|false (ALPHA - default=false) + KubeletCredentialProviders=true|false (ALPHA - default=false) KubeletInUserNamespace=true|false (ALPHA - default=false) KubeletPodResources=true|false (BETA - default=true) - KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) - KubeletTracing=true|false (ALPHA - default=false) - LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false) + LocalStorageCapacityIsolation=true|false (BETA - default=true) LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) LogarithmicScaleDown=true|false (BETA - default=true) - MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) - MaxUnavailableStatefulSet=true|false (ALPHA - default=false) MemoryManager=true|false (BETA - default=true) MemoryQoS=true|false (ALPHA - default=false) - MinDomainsInPodTopologySpread=true|false (BETA - default=false) - MinimizeIPTablesRestore=true|false (ALPHA - default=false) - MultiCIDRRangeAllocator=true|false (ALPHA - default=false) - NetworkPolicyStatus=true|false (ALPHA - default=false) - NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) - NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + MixedProtocolLBService=true|false (ALPHA - default=false) + NetworkPolicyEndPort=true|false (BETA - default=true) NodeSwap=true|false (ALPHA - default=false) - OpenAPIEnums=true|false (BETA - default=true) - OpenAPIV3=true|false (BETA - default=true) - PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) - PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + NonPreemptingPriority=true|false (BETA - default=true) + PodAffinityNamespaceSelector=true|false (BETA - default=true) PodDeletionCost=true|false (BETA - default=true) - PodDisruptionConditions=true|false (BETA - default=true) - PodHasNetworkCondition=true|false (ALPHA - default=false) - PodSchedulingReadiness=true|false (ALPHA - default=false) - ProbeTerminationGracePeriod=true|false (BETA - default=true) + PodOverhead=true|false (BETA - default=true) + PodSecurity=true|false (ALPHA - default=false) + PreferNominatedNode=true|false (BETA - default=true) + ProbeTerminationGracePeriod=true|false (BETA - default=false) ProcMountType=true|false (ALPHA - default=false) - ProxyTerminatingEndpoints=true|false (BETA - default=true) + ProxyTerminatingEndpoints=true|false (ALPHA - default=false) QOSReserved=true|false (ALPHA - default=false) ReadWriteOncePod=true|false (ALPHA - default=false) - RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) RemainingItemCount=true|false (BETA - default=true) - RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RemoveSelfLink=true|false (BETA - default=true) RotateKubeletServerCertificate=true|false (BETA - default=true) - SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) - SeccompDefault=true|false (BETA - default=true) - ServerSideFieldValidation=true|false (BETA - default=true) + SeccompDefault=true|false (ALPHA - default=false) + ServiceInternalTrafficPolicy=true|false (BETA - default=true) + ServiceLBNodePortControl=true|false (BETA - default=true) + ServiceLoadBalancerClass=true|false (BETA - default=true) SizeMemoryBackedVolumes=true|false (BETA - default=true) - StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) - StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StatefulSetMinReadySeconds=true|false (ALPHA - default=false) StorageVersionAPI=true|false (ALPHA - default=false) StorageVersionHash=true|false (BETA - default=true) - TopologyAwareHints=true|false (BETA - default=true) + SuspendJob=true|false (BETA - default=true) + TTLAfterFinished=true|false (BETA - default=true) + TopologyAwareHints=true|false (ALPHA - default=false) TopologyManager=true|false (BETA - default=true) - TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) - TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) - TopologyManagerPolicyOptions=true|false (ALPHA - default=false) - UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) - ValidatingAdmissionPolicy=true|false (ALPHA - default=false) VolumeCapacityPriority=true|false (ALPHA - default=false) WinDSR=true|false (ALPHA - default=false) WinOverlay=true|false (BETA - default=true) - WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + WindowsHostProcessContainers=true|false (ALPHA - default=false) (default APIPriorityAndFairness=true) --github-badge whether to display the github badge --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. -h, --help help for karpor + --high-availability whether to use high-availability feature. --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --master-service-namespace string DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods. (default "default") --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) @@ -217,31 +217,30 @@ karpor [flags] --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) --service-account-extend-token-expiration Turns on projected service account expiration extension during token generation, which helps safe transition from legacy token to bound service account token feature. If this flag is enabled, admission injected tokens would be extended up to 1 year to prevent unexpected failure during transition, ignoring value of service-account-max-token-expiration. (default true) --service-account-issuer stringArray Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration. When this flag is specified multiple times, the first is used to generate tokens and all are used to determine which issuers are accepted. - --service-account-jwks-uri string Overrides the URI for the JSON Web Key Set in the discovery doc served at /.well-known/openid-configuration. This flag is useful if the discovery docand key set are served to relying parties from a URL other than the API server's external (as auto-detected or overridden with external-hostname). - --service-account-key-file stringArray File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. If unspecified, --tls-private-key-file is used. Must be specified when --service-account-signing-key-file is provided + --service-account-jwks-uri string Overrides the URI for the JSON Web Key Set in the discovery doc served at /.well-known/openid-configuration. This flag is useful if the discovery docand key set are served to relying parties from a URL other than the API server's external (as auto-detected or overridden with external-hostname). Only valid if the ServiceAccountIssuerDiscovery feature gate is enabled. + --service-account-key-file stringArray File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. If unspecified, --tls-private-key-file is used. Must be specified when --service-account-signing-key is provided --service-account-lookup If true, validate ServiceAccount tokens exist in etcd as part of authentication. (default true) --service-account-max-token-expiration duration The maximum validity duration of a token created by the service account token issuer. If an otherwise valid TokenRequest with a validity duration larger than this value is requested, a token will be issued with a validity duration of this value. --service-account-signing-key-file string Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. - --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). - --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. (default "application/json") --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. - Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256. - Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) --tracing-config-file string File with apiserver tracing configuration. -V, --version Print version and exit --watch-cache Enable watch caching in the apiserver (default true) - --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size ``` ### SEE ALSO * [karpor syncer](karpor_syncer.md) - start a resource syncer to sync resource from clusters -###### Auto generated by spf13/cobra on 27-Nov-2024 +###### Auto generated by spf13/cobra on 12-Mar-2025 diff --git a/docs/cli/karpor_syncer.md b/docs/cli/karpor_syncer.md index ce2063d2..97cd7fe4 100644 --- a/docs/cli/karpor_syncer.md +++ b/docs/cli/karpor_syncer.md @@ -9,14 +9,26 @@ karpor syncer [flags] ### Options ``` + --agent-image-tag string The agent image tag. (default "v0.0.0") + --ca-cert-file string Root CA certificate file for karpor server. (default "/etc/karpor/ca/ca.crt") + --ca-key-file string Root KEY file for karpor server.. (default "/etc/karpor/ca/ca.key") --elastic-search-addresses strings The elastic search address. + --external-addresses string The external address that expose to user cluster in pull mode. --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") -h, --help help for syncer + --high-availability Whether to use high-availability feature. --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") + --only-push-mode Only push mode in high availability feature. +``` + +### Options inherited from parent commands + +``` + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) ``` ### SEE ALSO * [karpor](karpor.md) - Launch an API server -###### Auto generated by spf13/cobra on 27-Nov-2024 +###### Auto generated by spf13/cobra on 12-Mar-2025 diff --git a/pkg/core/handler/cluster/cluster.go b/pkg/core/handler/cluster/cluster.go index ff4f376e..b70ab76f 100644 --- a/pkg/core/handler/cluster/cluster.go +++ b/pkg/core/handler/cluster/cluster.go @@ -20,11 +20,6 @@ import ( "strconv" "strings" - "github.com/KusionStack/karpor/pkg/core/handler" - "github.com/KusionStack/karpor/pkg/core/manager/cluster" - "github.com/KusionStack/karpor/pkg/infra/multicluster" - "github.com/KusionStack/karpor/pkg/util/clusterinstall" - "github.com/KusionStack/karpor/pkg/util/ctxutil" "github.com/go-chi/chi/v5" "github.com/pkg/errors" _ "k8s.io/api/core/v1" @@ -33,6 +28,12 @@ import ( "k8s.io/apiserver/pkg/server" "k8s.io/client-go/tools/clientcmd" k8syaml "sigs.k8s.io/yaml" + + "github.com/KusionStack/karpor/pkg/core/handler" + "github.com/KusionStack/karpor/pkg/core/manager/cluster" + "github.com/KusionStack/karpor/pkg/infra/multicluster" + "github.com/KusionStack/karpor/pkg/util/clusterinstall" + "github.com/KusionStack/karpor/pkg/util/ctxutil" ) // Get returns an HTTP handler function that reads a cluster @@ -112,7 +113,8 @@ func Create(clusterMgr *cluster.ClusterManager, c *server.CompletedConfig) http. } client, _ := multicluster.BuildMultiClusterClient(r.Context(), c.LoopbackClientConfig, "") - clusterCreated, err := clusterMgr.CreateCluster(r.Context(), client, cluster, payload.ClusterDisplayName, payload.ClusterDescription, payload.ClusterKubeConfig) + clusterCreated, err := clusterMgr.CreateCluster(r.Context(), client, cluster, payload.ClusterDisplayName, payload.ClusterDescription, + payload.ClusterMode, payload.ClusterKubeConfig, payload.ClusterLevel) handler.HandleResult(w, r, ctx, err, clusterCreated) } } @@ -242,13 +244,15 @@ func Delete(clusterMgr *cluster.ClusterManager, c *server.CompletedConfig) http. // @Tags cluster // @Accept multipart/form-data // @Produce plain -// @Param file formData file true "Upload file with field name 'file'" -// @Param name formData string true "cluster name" -// @Param displayName formData string true "cluster display name" -// @Param description formData string true "cluster description" -// @Success 200 {object} UploadData "Returns the content of the uploaded KubeConfig file." -// @Failure 400 {string} string "The uploaded file is too large or the request is invalid." -// @Failure 500 {string} string "Internal server error." +// @Param file formData file true "Upload file with field name 'file'" +// @Param name formData string true "cluster name" +// @Param displayName formData string true "cluster display name" +// @Param description formData string true "cluster description" +// @Param clusterMode formData string true "cluster mode" +// @Param clusterLevel formData int true "cluster scale level" +// @Success 200 {object} UploadData "Returns the content of the uploaded KubeConfig file." +// @Failure 400 {string} string "The uploaded file is too large or the request is invalid." +// @Failure 500 {string} string "Internal server error." // @Router /rest-api/v1/cluster/config/file [post] func UploadKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { @@ -271,6 +275,13 @@ func UploadKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { name := r.FormValue("name") displayName := r.FormValue("displayName") description := r.FormValue("description") + clusterMode := r.FormValue("clusterMode") + clusterLevel := r.FormValue("clusterLevel") + level, err := strconv.Atoi(clusterLevel) + if err != nil { + log.Info("failed to parse cluster level") + level = 1 + } file, fileHeader, err := r.FormFile("file") if err != nil { handler.FailureRender(ctx, w, r, errors.Wrapf(err, "failed to get uploaded file")) @@ -297,7 +308,7 @@ func UploadKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { } // Convert the rest.Config to Cluster object. - clusterObj, err := clusterinstall.ConvertKubeconfigToCluster(name, displayName, description, restConfig) + clusterObj, err := clusterinstall.ConvertKubeconfigToCluster(name, displayName, description, clusterMode, level, restConfig) if err != nil { handler.FailureRender(ctx, w, r, errors.Wrapf(err, "error convert kubeconfig to cluster")) return @@ -343,12 +354,12 @@ func UploadKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { // @Accept json // @Produce json // @Param request body ValidatePayload true "KubeConfig payload to validate" -// @Success 200 {string} string "Verification passed server version" -// @Failure 400 {object} string "Bad Request" -// @Failure 401 {object} string "Unauthorized" -// @Failure 429 {object} string "Too Many Requests" -// @Failure 404 {object} string "Not Found" -// @Failure 500 {object} string "Internal Server Error" +// @Success 200 {string} string "Verification passed server version" +// @Failure 400 {object} string "Bad Request" +// @Failure 401 {object} string "Unauthorized" +// @Failure 429 {object} string "Too Many Requests" +// @Failure 404 {object} string "Not Found" +// @Failure 500 {object} string "Internal Server Error" // @Router /rest-api/v1/cluster/config/validate [post] func ValidateKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { @@ -381,3 +392,40 @@ func ValidateKubeConfig(clusterMgr *cluster.ClusterManager) http.HandlerFunc { } } } + +// GetAgentYml returns an HTTP handler function to obtain the agent yaml of the special cluster. +// +// @Summary Get agent yaml +// @Description Obtain the agent yaml in secret for cluster. +// @Tags cluster +// @Accept plain +// @Accept json +// @Produce json +// @Param clusterName path string true "The name of the cluster" +// @Success 200 {string} string "Verification passed server version" +// @Failure 400 {object} string "Bad Request" +// @Failure 401 {object} string "Unauthorized" +// @Failure 429 {object} string "Too Many Requests" +// @Failure 404 {object} string "Not Found" +// @Failure 500 {object} string "Internal Server Error" +// @Router /rest-api/v1/cluster/{clusterName}/agentYml [get] +func GetAgentYml(clusterMgr *cluster.ClusterManager, c *server.CompletedConfig) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Extract the context and logger from the request. + ctx := r.Context() + logger := ctxutil.GetLogger(ctx) + cluster := chi.URLParam(r, "clusterName") + logger.Info("Getting cluster...", "cluster", cluster) + + client, err := multicluster.BuildHubClients(r.Context(), c.LoopbackClientConfig) + if err != nil { + handler.FailureRender(ctx, w, r, err) + return + } + + agentYaml, err := clusterMgr.GetAgentYamlForCluster(r.Context(), client, cluster) + handler.HandleResult(w, r, ctx, err, map[string]string{ + "agentYml": string(agentYaml), + }) + } +} diff --git a/pkg/core/handler/cluster/types.go b/pkg/core/handler/cluster/types.go index e61bd5a6..85c6542a 100644 --- a/pkg/core/handler/cluster/types.go +++ b/pkg/core/handler/cluster/types.go @@ -33,9 +33,11 @@ var sortCriteriaMap = map[string]cluster.SortCriteria{ // //nolint:tagliatelle type ClusterPayload struct { - ClusterDisplayName string `json:"displayName"` // ClusterDisplayName is the display name of cluster to be created - ClusterDescription string `json:"description"` // ClusterDescription is the description of cluster to be created - ClusterKubeConfig string `json:"kubeConfig"` // ClusterKubeConfig is the kubeconfig of cluster to be created + ClusterDisplayName string `json:"displayName"` // ClusterDisplayName is the display name of cluster to be created + ClusterDescription string `json:"description"` // ClusterDescription is the description of cluster to be created + ClusterKubeConfig string `json:"kubeConfig"` // ClusterKubeConfig is the kubeconfig of cluster to be created + ClusterMode string `json:"clusterMode"` // ClusterMode is the mode of cluster to be created + ClusterLevel int `json:"clusterLevel"` // clusterLevel is the scale level of cluster to be created } type UploadData struct { diff --git a/pkg/core/manager/cluster/manager.go b/pkg/core/manager/cluster/manager.go index 84faea32..b810b5cb 100644 --- a/pkg/core/manager/cluster/manager.go +++ b/pkg/core/manager/cluster/manager.go @@ -20,11 +20,6 @@ import ( "fmt" "time" - "github.com/KusionStack/karpor/pkg/core/handler" - "github.com/KusionStack/karpor/pkg/infra/multicluster" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" - "github.com/KusionStack/karpor/pkg/util/clusterinstall" - "github.com/KusionStack/karpor/pkg/util/ctxutil" errors2 "github.com/pkg/errors" yaml "gopkg.in/yaml.v3" v1 "k8s.io/api/core/v1" @@ -35,7 +30,14 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" k8syaml "sigs.k8s.io/yaml" + + "github.com/KusionStack/karpor/pkg/core/handler" + "github.com/KusionStack/karpor/pkg/infra/multicluster" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + "github.com/KusionStack/karpor/pkg/util/clusterinstall" + "github.com/KusionStack/karpor/pkg/util/ctxutil" ) type ClusterManager struct{} @@ -68,7 +70,7 @@ func (c *ClusterManager) GetCluster( func (c *ClusterManager) CreateCluster( ctx context.Context, client *multicluster.MultiClusterClient, - name, displayName, description, kubeconfig string, + name, displayName, description, clusterMode, kubeconfig string, clusterLevel int, ) (*unstructured.Unstructured, error) { clusterGVR := clusterv1beta1.SchemeGroupVersion.WithResource("clusters") // Make sure the cluster does not exist first @@ -91,6 +93,8 @@ func (c *ClusterManager) CreateCluster( name, displayName, description, + clusterMode, + clusterLevel, restConfig, ) if err != nil { @@ -142,6 +146,8 @@ func (c *ClusterManager) UpdateCredential( } displayName := currentObj.Object["spec"].(map[string]interface{})["displayName"].(string) description := currentObj.Object["spec"].(map[string]interface{})["description"].(string) + clusterMode := currentObj.Object["spec"].(map[string]interface{})["mode"].(string) + clusterLevel := currentObj.Object["spec"].(map[string]interface{})["level"].(int) // Create new restConfig from updated kubeconfig restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) @@ -153,7 +159,9 @@ func (c *ClusterManager) UpdateCredential( clusterObj, err := clusterinstall.ConvertKubeconfigToCluster( name, displayName, + clusterMode, description, + clusterLevel, restConfig, ) if err != nil { @@ -466,3 +474,22 @@ func (c *ClusterManager) ValidateKubeConfigFor( return info.String(), nil } } + +// GetAgentYamlForCluster returns the agent yaml byte for a given cluster +func (c *ClusterManager) GetAgentYamlForCluster( + ctx context.Context, + client *multicluster.MultiClusterClient, + name string, +) ([]byte, error) { + secret, err := client.ClientSet.CoreV1().Secrets("karpor").Get(ctx, fmt.Sprintf("%s-agent", name), metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + return nil, err + } + if errors.IsNotFound(err) { + return nil, nil + } + if secret == nil || secret.Data == nil { + return nil, errors.NewNotFound(corev1.Resource("secrets"), name) + } + return secret.Data["config"], nil +} diff --git a/pkg/core/manager/cluster/manager_test.go b/pkg/core/manager/cluster/manager_test.go index 3e8f5682..b97d60c3 100644 --- a/pkg/core/manager/cluster/manager_test.go +++ b/pkg/core/manager/cluster/manager_test.go @@ -20,8 +20,6 @@ import ( "fmt" "testing" - "github.com/KusionStack/karpor/pkg/infra/multicluster" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/errors" @@ -29,6 +27,9 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" k8syaml "sigs.k8s.io/yaml" + + "github.com/KusionStack/karpor/pkg/infra/multicluster" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" ) // TestGetCluster tests the GetCluster method of the ClusterManager for various @@ -132,6 +133,8 @@ func TestCreateCluster(t *testing.T) { clusterName string displayName string description string + clusterMode string + clusterLevel int kubeConfig string expectError bool expectedErrorMessage string @@ -172,7 +175,9 @@ func TestCreateCluster(t *testing.T) { tc.clusterName, tc.displayName, tc.description, + tc.clusterMode, tc.kubeConfig, + tc.clusterLevel, ) if tc.expectError { @@ -204,6 +209,8 @@ func TestUpdateMetadata(t *testing.T) { clusterName string displayName string description string + clusterMode string + clusterLevel int expectError bool expectedError string }{ @@ -484,6 +491,8 @@ spec: privateKey: M2I5NioqKioqKioqKioqKioqKioqKioqKioqKjY1MzY= description: mock-description displayName: Existing Cluster + level: 2 + mode: pull `, }, { @@ -731,6 +740,8 @@ func newMockCluster(name string) *unstructured.Unstructured { "caBundle": "sensitive-ca-bundle", }, }, + "mode": "pull", + "level": 2, } // Set annotations on the object diff --git a/pkg/core/manager/cluster/types.go b/pkg/core/manager/cluster/types.go index 55d2b973..e633d629 100644 --- a/pkg/core/manager/cluster/types.go +++ b/pkg/core/manager/cluster/types.go @@ -104,3 +104,7 @@ type User struct { Username string `yaml:"username,omitempty"` Password string `yaml:"password,omitempty"` } + +type AgentYml struct { + AgentYml string `json:"agentYml"` +} diff --git a/pkg/core/manager/insight/events_test.go b/pkg/core/manager/insight/events_test.go index f1934d72..f6e85afa 100644 --- a/pkg/core/manager/insight/events_test.go +++ b/pkg/core/manager/insight/events_test.go @@ -18,11 +18,12 @@ import ( "context" "testing" - "github.com/KusionStack/karpor/pkg/core/entity" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/client-go/dynamic" + + "github.com/KusionStack/karpor/pkg/core/entity" ) func TestInsightManager_GetResourceEvents(t *testing.T) { diff --git a/pkg/core/manager/insight/resource_test.go b/pkg/core/manager/insight/resource_test.go index c8ef0aa2..93e6bbcb 100644 --- a/pkg/core/manager/insight/resource_test.go +++ b/pkg/core/manager/insight/resource_test.go @@ -19,12 +19,13 @@ import ( "reflect" "testing" - "github.com/KusionStack/karpor/pkg/core/entity" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/client-go/dynamic" + + "github.com/KusionStack/karpor/pkg/core/entity" ) // TestGetResource tests the TestGetResource method of the InsightManager for diff --git a/pkg/core/manager/insight/summary_test.go b/pkg/core/manager/insight/summary_test.go index 1cba9db5..36446bd3 100644 --- a/pkg/core/manager/insight/summary_test.go +++ b/pkg/core/manager/insight/summary_test.go @@ -18,7 +18,6 @@ import ( "context" "testing" - "github.com/KusionStack/karpor/pkg/core/entity" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" @@ -27,6 +26,8 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + + "github.com/KusionStack/karpor/pkg/core/entity" ) func TestInsightManager_GetResourceSummary(t *testing.T) { diff --git a/pkg/core/manager/insight/test_helper.go b/pkg/core/manager/insight/test_helper.go index 9173cf89..c53347cb 100644 --- a/pkg/core/manager/insight/test_helper.go +++ b/pkg/core/manager/insight/test_helper.go @@ -17,9 +17,6 @@ package insight import ( "context" - "github.com/KusionStack/karpor/pkg/core/entity" - "github.com/KusionStack/karpor/pkg/infra/multicluster" - "github.com/KusionStack/karpor/pkg/infra/search/storage" coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -30,6 +27,10 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/KusionStack/karpor/pkg/core/entity" + "github.com/KusionStack/karpor/pkg/infra/multicluster" + "github.com/KusionStack/karpor/pkg/infra/search/storage" ) // mockMultiClusterClient returns a mock MultiClusterClient for testing purposes. diff --git a/pkg/core/manager/insight/topology_test.go b/pkg/core/manager/insight/topology_test.go index 894534a4..24a52bf2 100644 --- a/pkg/core/manager/insight/topology_test.go +++ b/pkg/core/manager/insight/topology_test.go @@ -19,12 +19,13 @@ import ( "reflect" "testing" - "github.com/KusionStack/karpor/pkg/core/entity" - "github.com/KusionStack/karpor/pkg/infra/topology" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/client-go/dynamic" + + "github.com/KusionStack/karpor/pkg/core/entity" + "github.com/KusionStack/karpor/pkg/infra/topology" ) func TestInsightManager_GetTopologyForCluster(t *testing.T) { diff --git a/pkg/core/route/route.go b/pkg/core/route/route.go index c3cded43..691cc7c0 100644 --- a/pkg/core/route/route.go +++ b/pkg/core/route/route.go @@ -20,6 +20,12 @@ import ( "expvar" "net/http" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + httpswagger "github.com/swaggo/http-swagger/v2" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/klog/v2" + docs "github.com/KusionStack/karpor/api/openapispec" aggregatorhandler "github.com/KusionStack/karpor/pkg/core/handler/aggregator" authnhandler "github.com/KusionStack/karpor/pkg/core/handler/authn" @@ -44,11 +50,6 @@ import ( "github.com/KusionStack/karpor/pkg/infra/search/storage" "github.com/KusionStack/karpor/pkg/kubernetes/registry" "github.com/KusionStack/karpor/pkg/kubernetes/registry/search" - "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" - httpswagger "github.com/swaggo/http-swagger/v2" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/klog/v2" ) // NewCoreRoute creates and configures an instance of chi.Mux with the given @@ -156,6 +157,7 @@ func setupRestAPIV1( r.Post("/", clusterhandler.Create(clusterMgr, genericConfig)) r.Put("/", clusterhandler.Update(clusterMgr, genericConfig)) r.Delete("/", clusterhandler.Delete(clusterMgr, genericConfig)) + r.Get("/agentYml", clusterhandler.GetAgentYml(clusterMgr, genericConfig)) }) r.Post("/config/file", clusterhandler.UploadKubeConfig(clusterMgr)) r.Post("/config/validate", clusterhandler.ValidateKubeConfig(clusterMgr)) diff --git a/pkg/infra/multicluster/multicluster.go b/pkg/infra/multicluster/multicluster.go index 225052e0..3f976001 100644 --- a/pkg/infra/multicluster/multicluster.go +++ b/pkg/infra/multicluster/multicluster.go @@ -19,8 +19,6 @@ import ( "net" "net/url" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" - "github.com/KusionStack/karpor/pkg/kubernetes/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -30,6 +28,9 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" metrics "k8s.io/metrics/pkg/client/clientset/versioned" metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" ) // MultiClusterClient represents the client used to interact with multiple Kubernetes clusters. diff --git a/pkg/infra/search/storage/elasticsearch/resource.go b/pkg/infra/search/storage/elasticsearch/resource.go index bcc205ad..dc3546a8 100644 --- a/pkg/infra/search/storage/elasticsearch/resource.go +++ b/pkg/infra/search/storage/elasticsearch/resource.go @@ -21,11 +21,12 @@ import ( "fmt" "time" - "github.com/KusionStack/karpor/pkg/infra/search/storage" "github.com/elliotxx/esquery" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" ) const ( @@ -124,7 +125,7 @@ func (s *Storage) GetResource(ctx context.Context, cluster string, obj runtime.O } if len(resp.Hits.Hits) == 0 { - return fmt.Errorf("no resource found for cluster: %s, namespace: %s, name: %s", cluster, unObj.GetNamespace(), unObj.GetName()) + return ErrNotFound } res, err := storage.Map2Resource(resp.Hits.Hits[0].Source) diff --git a/pkg/infra/search/storage/types.go b/pkg/infra/search/storage/types.go index 4b444d19..5408871b 100644 --- a/pkg/infra/search/storage/types.go +++ b/pkg/infra/search/storage/types.go @@ -76,6 +76,7 @@ type SearchStorage interface { type SearchStorageGetter interface { GetSearchStorage() (SearchStorage, error) } + type ResourceStorageGetter interface { GetResourceStorage() (ResourceStorage, error) } diff --git a/pkg/infra/topology/children.go b/pkg/infra/topology/children.go index 4cdaab7c..001df724 100644 --- a/pkg/infra/topology/children.go +++ b/pkg/infra/topology/children.go @@ -20,14 +20,15 @@ import ( "context" "errors" - "github.com/KusionStack/karpor/pkg/util/ctxutil" - topologyutil "github.com/KusionStack/karpor/pkg/util/topology" "github.com/dominikbraun/graph" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + + "github.com/KusionStack/karpor/pkg/util/ctxutil" + topologyutil "github.com/KusionStack/karpor/pkg/util/topology" ) // GetChildResourcesList returns an *unstructured.UnstructuredList representing all resources that matches the child GVK in the current namespace diff --git a/pkg/infra/topology/relationship.go b/pkg/infra/topology/relationship.go index a0267fc8..13808855 100644 --- a/pkg/infra/topology/relationship.go +++ b/pkg/infra/topology/relationship.go @@ -24,12 +24,6 @@ import ( "github.com/pkg/errors" - "github.com/KusionStack/karpor/config" - "github.com/KusionStack/karpor/pkg/core/entity" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" - "github.com/KusionStack/karpor/pkg/util/ctxutil" - topologyutil "github.com/KusionStack/karpor/pkg/util/topology" "github.com/dominikbraun/graph" yaml "gopkg.in/yaml.v3" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -39,6 +33,13 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/klog/v2" + + "github.com/KusionStack/karpor/config" + "github.com/KusionStack/karpor/pkg/core/entity" + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + "github.com/KusionStack/karpor/pkg/util/ctxutil" + topologyutil "github.com/KusionStack/karpor/pkg/util/topology" ) // GetHash method returns the hash of the resource graph node. diff --git a/pkg/kubernetes/apis/cluster/types.go b/pkg/kubernetes/apis/cluster/types.go index 3eb3b21e..05ec2461 100644 --- a/pkg/kubernetes/apis/cluster/types.go +++ b/pkg/kubernetes/apis/cluster/types.go @@ -28,6 +28,14 @@ const ( CredentialTypeOIDC CredentialType = "OIDC" ) +const ( + PullClusterMode = "pull" + PushClusterMode = "push" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:subresource:status // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -58,6 +66,9 @@ type ClusterSpec struct { Description string `json:"description,omitempty"` DisplayName string `json:"displayName,omitempty"` Finalized *bool `json:"finalized,omitempty"` + Mode string `json:"mode,omitempty"` + // cluster scale level, optional value 1, 2, 3, default 1 + Level int `json:"level"` } type ClusterStatus struct { diff --git a/pkg/kubernetes/apis/cluster/v1beta1/types.go b/pkg/kubernetes/apis/cluster/v1beta1/types.go index 4a80096a..b5f5a3b9 100644 --- a/pkg/kubernetes/apis/cluster/v1beta1/types.go +++ b/pkg/kubernetes/apis/cluster/v1beta1/types.go @@ -28,6 +28,18 @@ const ( CredentialTypeOIDC CredentialType = "OIDC" ) +const ( + PullClusterMode = "pull" + PushClusterMode = "push" +) + +const ( + ClusterFinalizer = "finalizers.cluster.karpor.io" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:subresource:status // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -59,6 +71,9 @@ type ClusterSpec struct { Description string `json:"description"` DisplayName string `json:"displayName"` Finalized *bool `json:"finalized,omitempty"` + Mode string `json:"mode,omitempty"` + // cluster scale level, optional value 1, 2, 3, default 1 + Level int `json:"level"` } type ClusterStatus struct { diff --git a/pkg/kubernetes/apis/cluster/v1beta1/zz_generated.conversion.go b/pkg/kubernetes/apis/cluster/v1beta1/zz_generated.conversion.go index 194e8c33..339cfc80 100644 --- a/pkg/kubernetes/apis/cluster/v1beta1/zz_generated.conversion.go +++ b/pkg/kubernetes/apis/cluster/v1beta1/zz_generated.conversion.go @@ -297,6 +297,8 @@ func autoConvert_v1beta1_ClusterSpec_To_cluster_ClusterSpec(in *ClusterSpec, out out.Description = in.Description out.DisplayName = in.DisplayName out.Finalized = (*bool)(unsafe.Pointer(in.Finalized)) + out.Mode = in.Mode + out.Level = in.Level return nil } @@ -313,6 +315,8 @@ func autoConvert_cluster_ClusterSpec_To_v1beta1_ClusterSpec(in *cluster.ClusterS out.Description = in.Description out.DisplayName = in.DisplayName out.Finalized = (*bool)(unsafe.Pointer(in.Finalized)) + out.Mode = in.Mode + out.Level = in.Level return nil } diff --git a/pkg/kubernetes/apis/search/v1beta1/types.go b/pkg/kubernetes/apis/search/v1beta1/types.go index 6c722392..229391d3 100644 --- a/pkg/kubernetes/apis/search/v1beta1/types.go +++ b/pkg/kubernetes/apis/search/v1beta1/types.go @@ -20,6 +20,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:subresource:status // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -62,6 +65,8 @@ type SyncRegistryList struct { Items []SyncRegistry `json:"items"` } +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -137,6 +142,8 @@ type ResourceSyncRule struct { RemainAfterDeleted bool `json:"remainAfterDeleted,omitempty"` } +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -175,6 +182,8 @@ type TrimRuleList struct { Items []TrimRule `json:"items"` } +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/kubernetes/generated/informers/externalversions/cluster/v1beta1/cluster.go b/pkg/kubernetes/generated/informers/externalversions/cluster/v1beta1/cluster.go index 456fb061..21689c93 100644 --- a/pkg/kubernetes/generated/informers/externalversions/cluster/v1beta1/cluster.go +++ b/pkg/kubernetes/generated/informers/externalversions/cluster/v1beta1/cluster.go @@ -76,17 +76,14 @@ func NewFilteredClusterInformer(client versioned.Interface, resyncPeriod time.Du ) } -// defaultInformer provides a default implementation for the cluster informer using the given client and resync period. func (f *clusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredClusterInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -// Informer returns the SharedIndexInformer for the cluster informer. func (f *clusterInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&clusterv1beta1.Cluster{}, f.defaultInformer) } -// Lister returns the ClusterLister for the cluster informer. func (f *clusterInformer) Lister() v1beta1.ClusterLister { return v1beta1.NewClusterLister(f.Informer().GetIndexer()) } diff --git a/pkg/kubernetes/generated/informers/externalversions/factory.go b/pkg/kubernetes/generated/informers/externalversions/factory.go index 90cb7bd5..116160dd 100644 --- a/pkg/kubernetes/generated/informers/externalversions/factory.go +++ b/pkg/kubernetes/generated/informers/externalversions/factory.go @@ -113,7 +113,6 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start starts all informers in the factory and waits for the stop channel to close before exiting. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() @@ -138,7 +137,6 @@ func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { } } -// Shutdown stops all running informers in the factory. func (f *sharedInformerFactory) Shutdown() { f.lock.Lock() f.shuttingDown = true @@ -148,7 +146,6 @@ func (f *sharedInformerFactory) Shutdown() { f.wg.Wait() } -// WaitForCacheSync waits for all started informers to sync with the cluster state. func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -251,12 +248,10 @@ type SharedInformerFactory interface { Search() search.Interface } -// Cluster returns the cluster informer. func (f *sharedInformerFactory) Cluster() cluster.Interface { return cluster.New(f, f.namespace, f.tweakListOptions) } -// Search returns the search informer. func (f *sharedInformerFactory) Search() search.Interface { return search.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncregistry.go b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncregistry.go index a01e732c..2f1edc17 100644 --- a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncregistry.go +++ b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncregistry.go @@ -76,17 +76,14 @@ func NewFilteredSyncRegistryInformer(client versioned.Interface, resyncPeriod ti ) } -// defaultInformer provides the default implementation for the shared informer used by the SyncRegistryInformer. func (f *syncRegistryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredSyncRegistryInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -// Informer returns the shared informer for the SyncRegistryInformer. func (f *syncRegistryInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&searchv1beta1.SyncRegistry{}, f.defaultInformer) } -// Lister returns the lister for the SyncRegistryInformer. func (f *syncRegistryInformer) Lister() v1beta1.SyncRegistryLister { return v1beta1.NewSyncRegistryLister(f.Informer().GetIndexer()) } diff --git a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncresources.go b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncresources.go index 335d512f..5c386813 100644 --- a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncresources.go +++ b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/syncresources.go @@ -76,17 +76,14 @@ func NewFilteredSyncResourcesInformer(client versioned.Interface, resyncPeriod t ) } -// defaultInformer provides a default SharedIndexInformer implementation for SyncResources resources. func (f *syncResourcesInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredSyncResourcesInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -// Informer returns the SharedIndexInformer for SyncResources resources. func (f *syncResourcesInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&searchv1beta1.SyncResources{}, f.defaultInformer) } -// Lister returns the SyncResourcesLister for SyncResources resources. func (f *syncResourcesInformer) Lister() v1beta1.SyncResourcesLister { return v1beta1.NewSyncResourcesLister(f.Informer().GetIndexer()) } diff --git a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/transformrule.go b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/transformrule.go index 509aebd2..443efd2c 100644 --- a/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/transformrule.go +++ b/pkg/kubernetes/generated/informers/externalversions/search/v1beta1/transformrule.go @@ -76,17 +76,14 @@ func NewFilteredTransformRuleInformer(client versioned.Interface, resyncPeriod t ) } -// defaultInformer provides a default implementation for the SharedIndexInformer interface. func (f *transformRuleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredTransformRuleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -// Informer returns the SharedIndexInformer for TransformRule resources. func (f *transformRuleInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&searchv1beta1.TransformRule{}, f.defaultInformer) } -// Lister returns the TransformRuleLister for TransformRule resources. func (f *transformRuleInformer) Lister() v1beta1.TransformRuleLister { return v1beta1.NewTransformRuleLister(f.Informer().GetIndexer()) } diff --git a/pkg/kubernetes/generated/openapi/zz_generated.openapi.go b/pkg/kubernetes/generated/openapi/zz_generated.openapi.go index 116b9869..524bd5f9 100644 --- a/pkg/kubernetes/generated/openapi/zz_generated.openapi.go +++ b/pkg/kubernetes/generated/openapi/zz_generated.openapi.go @@ -361,8 +361,22 @@ func schema_kubernetes_apis_cluster_v1beta1_ClusterSpec(ref common.ReferenceCall Format: "", }, }, + "mode": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "level": { + SchemaProps: spec.SchemaProps{ + Description: "cluster scale level, optional value 1, 2, 3, default 1", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, }, - Required: []string{"provider", "access", "displayName"}, + Required: []string{"provider", "access", "displayName", "level"}, }, }, Dependencies: []string{ diff --git a/pkg/kubernetes/registry/search/storage_provider.go b/pkg/kubernetes/registry/search/storage_provider.go index aed948cd..6c5e6cbf 100644 --- a/pkg/kubernetes/registry/search/storage_provider.go +++ b/pkg/kubernetes/registry/search/storage_provider.go @@ -19,6 +19,11 @@ package search import ( "fmt" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" + genericapiserver "k8s.io/apiserver/pkg/server" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage" "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" "github.com/KusionStack/karpor/pkg/kubernetes/apis/search" @@ -27,10 +32,6 @@ import ( "github.com/KusionStack/karpor/pkg/kubernetes/registry/search/transformrule" "github.com/KusionStack/karpor/pkg/kubernetes/registry/search/trimrule" "github.com/KusionStack/karpor/pkg/kubernetes/scheme" - "k8s.io/apiserver/pkg/registry/generic" - "k8s.io/apiserver/pkg/registry/rest" - genericapiserver "k8s.io/apiserver/pkg/server" - serverstorage "k8s.io/apiserver/pkg/server/storage" ) const ( diff --git a/pkg/server/server.go b/pkg/server/server.go index 2b09b964..00acd974 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -22,6 +22,13 @@ import ( "io/fs" "net/http" + "github.com/go-chi/chi/v5" + "k8s.io/apiserver/pkg/registry/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/klog/v2" + rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" + "github.com/KusionStack/karpor/pkg/core/route" "github.com/KusionStack/karpor/pkg/kubernetes/registry" clusterstorage "github.com/KusionStack/karpor/pkg/kubernetes/registry/cluster" @@ -29,12 +36,6 @@ import ( searchstorage "github.com/KusionStack/karpor/pkg/kubernetes/registry/search" "github.com/KusionStack/karpor/pkg/kubernetes/scheme" "github.com/KusionStack/karpor/ui" - "github.com/go-chi/chi/v5" - "k8s.io/apiserver/pkg/registry/generic" - genericapiserver "k8s.io/apiserver/pkg/server" - serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/klog/v2" - rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" ) // KarporServer is the carrier of the main process of Karpor. diff --git a/pkg/syncer/agent.go b/pkg/syncer/agent.go new file mode 100644 index 00000000..dbaf10d4 --- /dev/null +++ b/pkg/syncer/agent.go @@ -0,0 +1,210 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncer + +import ( + "context" + "fmt" + "os" + "os/user" + "path" + "path/filepath" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/utils" +) + +type filterForSdkFunc func(context.Context, schema.GroupVersionResource, *searchv1beta1.ResourceSyncRule) bool + +type AgentReconciler struct { + SyncReconciler + gvrToGVKCache sync.Map + discoveryClient discovery.DiscoveryInterface + clusterName string +} + +// NewAgentReconciler creates a new instance of the AgentReconciler structure with the given storage. +func NewAgentReconciler(storage storage.ResourceStorage, clusterName string) *AgentReconciler { + return &AgentReconciler{ + SyncReconciler: SyncReconciler{ + storage: storage, + }, + + clusterName: clusterName, + } +} + +// SetupWithManager sets up the AgentReconciler with the given manager and registers it as a controller. Different from the SyncReconcile, it only focus on the special cluster. +func (r *AgentReconciler) SetupWithManager(mgr ctrl.Manager) error { + // only focus on the special cluster + clusterFilter := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return e.Object.GetName() == r.clusterName + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectNew.GetName() == r.clusterName + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return e.Object.GetName() == r.clusterName + }, + GenericFunc: func(e event.GenericEvent) bool { + return e.Object.GetName() == r.clusterName + }, + } + + controller, err := ctrl.NewControllerManagedBy(mgr). + For(&clusterv1beta1.Cluster{}, builder.WithPredicates(clusterFilter)). + Watches(&source.Kind{Type: &searchv1beta1.SyncRegistry{}}, &handler.Funcs{ + CreateFunc: r.CreateEvent, + UpdateFunc: r.UpdateEvent, + DeleteFunc: r.DeleteEvent, + }). + Build(r) + if err != nil { + return err + } + r.client = mgr.GetClient() + r.controller = controller + r.discoveryClient = discovery.NewDiscoveryClientForConfigOrDie(mgr.GetConfig()) + // TODO: + r.mgr = NewMultiClusterSyncManager(context.Background(), r.controller, r.storage) + return nil +} + +// Reconcile is the main entry point for the syncer reconciler, which is called whenever there is a change in the watched resources. +func (r *AgentReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + logger := ctrl.LoggerFrom(ctx) + + var cluster clusterv1beta1.Cluster + if err := r.client.Get(ctx, req.NamespacedName, &cluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("cluster doesn't exist", "cluster", req.Name) + return reconcile.Result{}, r.stopCluster(ctx, req.Name) + } + return reconcile.Result{}, err + } + + if !cluster.DeletionTimestamp.IsZero() { + logger.Info("cluster is being deleted", "cluster", cluster.Name) + + err := r.stopCluster(ctx, cluster.Name) + if err != nil { + return reconcile.Result{}, err + } + + if len(cluster.Finalizers) > 0 { + cluster.Finalizers = nil + err := r.client.Update(ctx, &cluster) + if err != nil && !apierrors.IsNotFound(err) { + return reconcile.Result{}, err + } + } + + return reconcile.Result{}, nil + } + + return reconcile.Result{}, r.handleClusterAddOrUpdate(ctx, cluster.DeepCopy(), buildClusterConfigInAgent, r.filterForSdk) +} + +func (r *AgentReconciler) filterForSdk(ctx context.Context, gvr schema.GroupVersionResource, rule *searchv1beta1.ResourceSyncRule) bool { + gvk, err := r.gvrToGVK(ctx, gvr) + if err != nil { + return false + } + if _, exist := utils.GetSyncGVK(gvk); exist { + // If gvk map consist special gvk, it means that gvk is already reconciled by dynamic reconciler + utils.SetSyncGVK(gvk, *rule) + return false + } + return true +} + +func (r *AgentReconciler) gvrToGVK(ctx context.Context, gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if val, ok := r.gvrToGVKCache.Load(gvr); ok { + return val.(schema.GroupVersionKind), nil + } + + zero := schema.GroupVersionKind{} + + groupResources, err := restmapper.GetAPIGroupResources(r.discoveryClient) + if err != nil { + return zero, err + } + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + gvk, err := mapper.KindFor(gvr) + if err != nil { + return zero, err + } + + ctrl.LoggerFrom(ctx).Info("GVR to GVV", "gvr", gvr, "gvk", gvk) + r.gvrToGVKCache.Store(gvr, gvk) + return gvk, nil +} + +func buildClusterConfigInAgent(cluster *clusterv1beta1.Cluster) (*rest.Config, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{ + WarnIfAllMissing: false, + Precedence: []string{clientcmd.RecommendedHomeFile}, + MigrationRules: map[string]string{ + clientcmd.RecommendedHomeFile: filepath.Join(os.Getenv("HOME"), clientcmd.RecommendedHomeDir, ".kubeconfig"), + }, + } + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %v", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, path.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) + } + cfg, err := loadConfigWithContext("", loadingRules, "") + if err != nil { + return nil, err + } + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + return cfg, nil +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} diff --git a/pkg/syncer/agent_test.go b/pkg/syncer/agent_test.go new file mode 100644 index 00000000..35822a57 --- /dev/null +++ b/pkg/syncer/agent_test.go @@ -0,0 +1,212 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//nolint:dupl +package syncer + +import ( + "context" + "testing" + + "github.com/bytedance/mockey" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/restmapper" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" +) + +func TestAgentReconciler_Reconcile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1beta1.Cluster + req reconcile.Request + wantErr bool + }{ + { + "test no error", + &clusterv1beta1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + reconcile.Request{NamespacedName: types.NamespacedName{Name: "cluster1"}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &AgentReconciler{ + SyncReconciler: SyncReconciler{ + client: fake.NewClientBuilder().WithRuntimeObjects(tt.cluster).WithScheme(scheme.Scheme).Build(), + }, + } + m := mockey.Mock((*SyncReconciler).handleClusterAddOrUpdate).Return(nil).Build() + defer m.UnPatch() + _, err := r.Reconcile(context.TODO(), tt.req) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestNewAgentReconciler(t *testing.T) { + tests := []struct { + name string + storage storage.ResourceStorage + clusterName string + }{ + { + "test nil", + nil, + "example-cluster", + }, + { + "test not nil", + &elasticsearch.Storage{}, + "example-cluster2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := NewAgentReconciler(tt.storage, tt.clusterName) + require.Equal(t, got.storage, tt.storage) + require.Equal(t, got.clusterName, tt.clusterName) + }) + } +} + +func TestAgentReconciler_getResources(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1beta1.Cluster + registries []searchv1beta1.SyncRegistry + resources map[schema.GroupVersionResource]*searchv1beta1.ResourceSyncRule + allResources []*searchv1beta1.ResourceSyncRule + validResources []*searchv1beta1.ResourceSyncRule + wantErr bool + }{ + { + "test no error", + &clusterv1beta1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + []searchv1beta1.SyncRegistry{ + { + ObjectMeta: metav1.ObjectMeta{Name: "example-registry1"}, + Spec: searchv1beta1.SyncRegistrySpec{ + Clusters: []string{"cluster1"}, + }, + }, + }, + map[schema.GroupVersionResource]*searchv1beta1.ResourceSyncRule{ + v1.SchemeGroupVersion.WithResource("pods"): { + APIVersion: "v1", + Resource: "pods", + }, + }, + []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "v1", + Resource: "pods", + }, + }, + []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "v1", + Resource: "pods", + }, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &AgentReconciler{ + SyncReconciler: SyncReconciler{ + client: fake.NewClientBuilder().WithRuntimeObjects(tt.cluster).WithScheme(scheme.Scheme).Build(), + }, + } + r.gvrToGVKCache.Store(v1.SchemeGroupVersion.WithResource("pods"), schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + + mockey.Mock((*SyncReconciler).getRegistries).Return(tt.registries, nil).Build() + mockey.Mock((*SyncReconciler).getNormalizedResources).Return(tt.resources, nil, nil).Build() + defer mockey.UnPatchAll() + allResources, validResources, _, err := r.getResources(context.TODO(), tt.cluster, nil) + require.Equal(t, allResources, tt.allResources) + require.Equal(t, validResources, tt.validResources) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestAgentReconciler_gvrToGVK(t *testing.T) { + tests := []struct { + name string + gvk schema.GroupVersionKind + gvr schema.GroupVersionResource + wantErr bool + }{ + { + name: "test gvrToGVK", + gvk: v1.SchemeGroupVersion.WithKind("Pod"), + gvr: v1.SchemeGroupVersion.WithResource("pods"), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &AgentReconciler{} + + mockey.Mock(restmapper.GetAPIGroupResources).Return([]*restmapper.APIGroupResources{ + { + Group: metav1.APIGroup{ + Name: "", + Versions: []metav1.GroupVersionForDiscovery{ + {GroupVersion: "v1", Version: "v1"}, + }, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + { + Name: "pods", + Kind: "Pod", + Version: "v1", + Group: "", + }, + }, + }, + }, + }, nil).Build() + + got, err := r.gvrToGVK(context.Background(), tt.gvr) + require.Equal(t, tt.gvk, got) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/syncer/dynamic_reconciler.go b/pkg/syncer/dynamic_reconciler.go new file mode 100644 index 00000000..c03dcf3d --- /dev/null +++ b/pkg/syncer/dynamic_reconciler.go @@ -0,0 +1,232 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncer + +import ( + "context" + "reflect" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + clientgocache "k8s.io/client-go/tools/cache" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/transform" + "github.com/KusionStack/karpor/pkg/syncer/utils" +) + +type DynamicReconciler struct { + ctx context.Context + gvk schema.GroupVersionKind + clusterName string + logger logr.Logger + storage storage.ResourceStorage + + syncRule v1beta1.ResourceSyncRule + transformFunc clientgocache.TransformFunc + trimFunc clientgocache.TransformFunc + client client.Client + + scheme *runtime.Scheme +} + +func NewDynamicReconciler(ctx context.Context, clusterName string, gvk schema.GroupVersionKind, storage storage.ResourceStorage) *DynamicReconciler { + return &DynamicReconciler{ + ctx: ctx, + clusterName: clusterName, + gvk: gvk, + logger: ctrl.Log.WithName(gvk.String()), + storage: storage, + syncRule: utils.ZeroVal, + } +} + +func (r *DynamicReconciler) SetupWithManager(mgr manager.Manager) error { + // step 1: set client for DynamicReconciler + r.client = mgr.GetClient() + r.scheme = mgr.GetScheme() + + // step 2: set manager + c, err := controller.New(r.gvk.String(), mgr, controller.Options{ + Reconciler: r, + MaxConcurrentReconciles: 10, + }) + if err != nil { + return err + } + + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(r.gvk) + err = c.Watch(&source.Kind{Type: obj}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + // step 3: register transform func for template pkg + if err := transform.RegisterClusterTmplFunc(r.clusterName, "objectRef", r.getObject); err != nil { + r.logger.Error(err, "error in registering tmpl func") + } + + return nil +} + +func (r *DynamicReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, retErr error) { + logger := r.logger + logger.WithValues("kind", r.gvk).WithValues("name", req.NamespacedName) + + defer func() { + if retErr != nil { + logger.Error(retErr, "reconciliation error") + } + if err := recover(); err != nil { + logger.Error(errors.New("expected no panic; recovered"), "") + } + }() + + if reflect.DeepEqual(r.syncRule, utils.ZeroVal) { + rule, exist := utils.GetSyncGVK(r.gvk) + if exist && !reflect.DeepEqual(rule, utils.ZeroVal) { + r.syncRule = rule + + if rule.Transform != nil { + transformFunc, err := parseTransformer(rule.Transform, r.clusterName) + if err != nil { + return reconcile.Result{}, err + } + r.transformFunc = transformFunc + } + + if rule.Trim != nil { + trimFunc, err := parseTrimer(rule.Trim) + if err != nil { + return reconcile.Result{}, err + } + r.trimFunc = trimFunc + } + + logger.Info("get rule now, start to sync.") + } else { + logger.Info("wait rule ready, restart in 20 seconds...") + return reconcile.Result{RequeueAfter: 20 * time.Second}, nil + } + } + + /* + If using protobuf type to receive unstructured Object in the 1.12 Kubernetes, it will decode failed. + To be compatible with old version kubernetes, it uses object.Object rather than unstructured Object. + */ + runtimeObj, err := r.scheme.New(r.gvk) + if err != nil { + runtimeObj = &unstructured.Unstructured{} + } + obj, err := runtimeObjectToObject(runtimeObj) + if err != nil { + return reconcile.Result{}, err + } + + err = r.client.Get(ctx, req.NamespacedName, obj) + if err != nil { + if apierrors.IsNotFound(err) { + obj := genUnObj(r.syncRule, req.String()) + err = r.storage.DeleteResource(ctx, r.clusterName, obj) + if errors.Is(err, elasticsearch.ErrNotFound) { + logger.Error(err, "failed to delete resource", "kind", r.gvk.String(), "name", obj.GetName(), "namespace", obj.GetNamespace()) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + return reconcile.Result{}, err + } + + unstructuredObj, err := objectToUnstructured(obj) + if err != nil { + return reconcile.Result{}, err + } + // trimer + if r.trimFunc != nil { + val, err := r.trimFunc(unstructuredObj) + if err != nil { + return reconcile.Result{}, err + } + unstructuredObj = val.(*unstructured.Unstructured) + } + // transformer + if r.transformFunc != nil { + val, err := r.transformFunc(unstructuredObj) + if err != nil { + return reconcile.Result{}, err + } + unstructuredObj = val.(*unstructured.Unstructured) + } + err = r.storage.SaveResource(ctx, r.clusterName, unstructuredObj) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to save resource") + } + + return reconcile.Result{}, nil +} + +func (r *DynamicReconciler) getObject(apiVersion, kind, namespace, name string) (interface{}, error) { + gv, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return nil, err + } + + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: kind, + }) + err = r.client.Get(context.Background(), client.ObjectKey{ + Namespace: namespace, + Name: name, + }, obj) + if err != nil { + return nil, err + } + return obj, nil +} + +func objectToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { + unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap} + return unstructuredObj, nil +} + +func runtimeObjectToObject(runtimeObj interface{}) (client.Object, error) { + obj, ok := runtimeObj.(client.Object) + if !ok { + return nil, errors.New("error in convert runtimeObj") + } + return obj, nil +} diff --git a/pkg/syncer/dynamic_reconciler_test.go b/pkg/syncer/dynamic_reconciler_test.go new file mode 100644 index 00000000..edbb63a2 --- /dev/null +++ b/pkg/syncer/dynamic_reconciler_test.go @@ -0,0 +1,74 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncer + +import ( + "context" + "testing" + + "github.com/bytedance/mockey" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" + "github.com/KusionStack/karpor/pkg/syncer/utils" +) + +func TestDynamicReconciler_Reconcile(t *testing.T) { + tests := []struct { + name string + gvk schema.GroupVersionKind + pod *v1.Pod + req reconcile.Request + wantErr bool + }{ + { + "test no error with pods", + schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + &v1.Pod{}, + reconcile.Request{NamespacedName: types.NamespacedName{Name: "cluster1"}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := NewDynamicReconciler(context.Background(), "cluster-name", tt.gvk, &elasticsearch.Storage{}) + r.client = fake.NewClientBuilder().WithRuntimeObjects(tt.pod).WithScheme(scheme.Scheme).Build() + r.scheme = scheme.Scheme + utils.SetSyncGVK(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, v1beta1.ResourceSyncRule{ + Namespace: "karpor", + Transform: &v1beta1.TransformRuleSpec{ + Type: "patch", + }, + Trim: &v1beta1.TrimRuleSpec{}, + }) + + m := mockey.Mock((*elasticsearch.Storage).SaveResource).Return(nil).Build() + defer m.UnPatch() + _, err := r.Reconcile(context.TODO(), tt.req) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/syncer/parser.go b/pkg/syncer/parser.go new file mode 100644 index 00000000..6f771de0 --- /dev/null +++ b/pkg/syncer/parser.go @@ -0,0 +1,137 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncer + +import ( + "bytes" + "fmt" + "text/template" + + sprig "github.com/Masterminds/sprig/v3" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clientgocache "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/jsonextracter" + "github.com/KusionStack/karpor/pkg/syncer/transform" + "github.com/KusionStack/karpor/pkg/util/jsonpath" +) + +// parseTrimer creates and returns a trim function for the informerSource based on the provider trims. +func parseTrimer(t *v1beta1.TrimRuleSpec) (clientgocache.TransformFunc, error) { + if t == nil || len(t.Retain.JSONPaths) == 0 { + //nolint:nilnil,nolintlint + return nil, nil + } + + extracters := make([]jsonextracter.Extracter, 0, len(t.Retain.JSONPaths)) + for _, p := range t.Retain.JSONPaths { + p, err := jsonpath.RelaxedJSONPathExpression(p) + if err != nil { + return nil, err + } + + ex, err := jsonextracter.BuildExtracter(p, true) + if err != nil { + return nil, err + } + extracters = append(extracters, ex) + } + + trimFunc := func(obj interface{}) (ret interface{}, err error) { + defer func() { + if err != nil { + klog.Background().Error(err, "error in triming object") + ret, err = obj, nil + } + }() + + if d, ok := obj.(clientgocache.DeletedFinalStateUnknown); ok { + // Since we import ES data into informer cache at startup, the + // resource that was deleted during the restart will generate + // DeletedFinalStateUnknown. + // We unwarp the object here, so there is no need for following + // steps including event handler to care about DeletedFinalStateUnknown. + obj = d.Obj + } + + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("trim: object's type should be *unstructured.Unstructured, but received %T", obj) + } + + merged, err := jsonextracter.Merge(extracters, u.Object) + if err != nil { + return nil, err + } + + unObj := &unstructured.Unstructured{Object: merged} + return unObj, nil + } + + return trimFunc, nil +} + +// parseTransformer creates and returns a transformation function for the informerSource based on the provider transformers. +func parseTransformer(t *v1beta1.TransformRuleSpec, clusterName string) (clientgocache.TransformFunc, error) { + if t == nil { + //nolint:nilnil,nolintlint + return nil, nil + } + + fn, found := transform.GetTransformFunc(t.Type) + if !found { + return nil, fmt.Errorf("unsupported transform type %q", t.Type) + } + + tmpl, err := newTemplate(t.ValueTemplate, clusterName) + if err != nil { + return nil, errors.Wrap(err, "invalid transform template") + } + + return func(obj interface{}) (ret interface{}, err error) { + defer func() { + if err != nil { + klog.Background().Error(err, "error in transforming object") + } + }() + + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("transform: object's type should be *unstructured.Unstructured, but received %T", obj) + } + + templateData := struct { + *unstructured.Unstructured + Cluster string + }{ + Unstructured: u, + Cluster: clusterName, + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, templateData); err != nil { + return nil, errors.Wrap(err, "transform: error rendering template") + } + return fn(obj, buf.String()) + }, nil +} + +// newTemplate creates and returns a new text template from the provided string, which can be used for processing templates in the syncer. +func newTemplate(tmpl, cluster string) (*template.Template, error) { + clusterFuncs, _ := transform.GetClusterTmplFuncs(cluster) + return template.New("transformTemplate").Funcs(sprig.FuncMap()).Funcs(clusterFuncs).Parse(tmpl) +} diff --git a/pkg/syncer/parser_test.go b/pkg/syncer/parser_test.go new file mode 100644 index 00000000..7a7f71c3 --- /dev/null +++ b/pkg/syncer/parser_test.go @@ -0,0 +1,28 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncer + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" +) + +func TestResourceSyncer_parseTransformer(t *testing.T) { + _, err := parseTransformer(&v1beta1.TransformRuleSpec{Type: "patch"}, "clusterName") + require.NoError(t, err) +} diff --git a/pkg/syncer/reconciler.go b/pkg/syncer/reconciler.go index 19b4bbab..2927f075 100644 --- a/pkg/syncer/reconciler.go +++ b/pkg/syncer/reconciler.go @@ -15,25 +15,42 @@ package syncer import ( + "bytes" "context" + "crypto" + "crypto/x509" + "encoding/base64" "fmt" "reflect" + "strconv" "strings" + templateUtil "text/template" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" - searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" "github.com/pkg/errors" + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + utilErr "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/keyutil" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + "github.com/KusionStack/karpor/config" + "github.com/KusionStack/karpor/pkg/infra/search/storage" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/template" + "github.com/KusionStack/karpor/pkg/syncer/utils" + "github.com/KusionStack/karpor/pkg/util/certgenerator" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -48,18 +65,40 @@ const ( anyResource = "*" ) +type ( + buildClusterConfigFunc func(*clusterv1beta1.Cluster) (*rest.Config, error) +) + // SyncReconciler is the main structure that holds the state and dependencies for the multi-cluster syncer reconciler. type SyncReconciler struct { storage storage.ResourceStorage + highAvailability bool + client client.Client controller controller.Controller mgr MultiClusterSyncManager + + storageAddresses []string + + externalEndpoint string + agentImageTag string + + caCert *x509.Certificate + caKey crypto.Signer } // NewSyncReconciler creates a new instance of the SyncReconciler structure with the given storage. -func NewSyncReconciler(storage storage.ResourceStorage) *SyncReconciler { - return &SyncReconciler{storage: storage} +func NewSyncReconciler(storage storage.ResourceStorage, highAvailability bool, storageAddresses []string, externalEndpoint, agentImageTag string, caCert *x509.Certificate, caKey crypto.Signer) *SyncReconciler { + return &SyncReconciler{ + storage: storage, + highAvailability: highAvailability, + storageAddresses: storageAddresses, + externalEndpoint: externalEndpoint, + caCert: caCert, + caKey: caKey, + agentImageTag: agentImageTag, + } } // SetupWithManager sets up the SyncReconciler with the given manager and registers it as a controller. @@ -123,7 +162,7 @@ func (r *SyncReconciler) DeleteEvent(de event.DeleteEvent, queue workqueue.RateL } // Reconcile is the main entry point for the syncer reconciler, which is called whenever there is a change in the watched resources. -func (r *SyncReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { +func (r *SyncReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, retErr error) { logger := ctrl.LoggerFrom(ctx) var cluster clusterv1beta1.Cluster @@ -135,9 +174,24 @@ func (r *SyncReconciler) Reconcile(ctx context.Context, req reconcile.Request) ( return reconcile.Result{}, err } + defer func() { + // patch of delete finalizer for high availability cluster. + if r.isReconcilerEnabled(&cluster) { + err := r.client.Update(ctx, &cluster) + if err != nil && !apierrors.IsNotFound(err) { + retErr = err + } + } + }() + if !cluster.DeletionTimestamp.IsZero() { logger.Info("cluster is being deleted", "cluster", cluster.Name) - return reconcile.Result{}, r.stopCluster(ctx, cluster.Name) + + err := r.reconcileDelete(ctx, &cluster) + if err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil } // TODO: it's danger @@ -146,7 +200,49 @@ func (r *SyncReconciler) Reconcile(ctx context.Context, req reconcile.Request) ( // return reconcile.Result{}, r.stopCluster(ctx, cluster.Name) // } - return reconcile.Result{}, r.handleClusterAddOrUpdate(ctx, cluster.DeepCopy()) + if r.highAvailability { + // set finalizer for cluster. + cluster.SetFinalizers([]string{clusterv1beta1.ClusterFinalizer}) + if cluster.Spec.Mode == clusterv1beta1.PushClusterMode { + return reconcile.Result{}, r.handleClusterAddOrUpdateForPush(ctx, cluster.DeepCopy()) + } + // default pull mode + if r.isReconcilerEnabled(&cluster) { + return reconcile.Result{}, r.handleClusterAddOrUpdateForPull(ctx, cluster.DeepCopy()) + } + return reconcile.Result{}, nil + } + + return reconcile.Result{}, r.handleClusterAddOrUpdate(ctx, cluster.DeepCopy(), buildClusterConfigInSyncer, nil) +} + +// reconcileDelete delete relevant resources for cluster. +func (r *SyncReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster) error { + err := r.stopCluster(ctx, cluster.Name) + if err != nil { + return err + } + + if r.highAvailability && cluster.Spec.Mode == clusterv1beta1.PushClusterMode { + clusterConfig, err := buildClusterConfigInSyncer(cluster) + if err != nil { + return errors.Wrapf(err, "failed to build config for cluster %s", cluster.Name) + } + dynamicClient, err := dynamic.NewForConfig(clusterConfig) + if err != nil { + return errors.Wrapf(err, "failed to build dynamic client for cluster %s", cluster.Name) + } + + err = dynamicClient.Resource(clusterv1beta1.SchemeGroupVersion.WithResource("clusters")).Namespace("").Delete(ctx, cluster.Name, metav1.DeleteOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to delete cluster cr %s in user cluster", cluster.Name) + } + } + + if r.isReconcilerEnabled(cluster) { + cluster.SetFinalizers(nil) + } + return nil } // stopCluster stops the reconciliation process for the given cluster. @@ -157,6 +253,21 @@ func (r *SyncReconciler) stopCluster(ctx context.Context, clusterName string) er return err } r.mgr.Stop(ctx, clusterName) + + if r.highAvailability { + // delete secret + secretName := fmt.Sprintf("%s-agent", clusterName) + err := r.client.Delete(ctx, &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: "karpor", + }, + }) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + } + logger.Info("syncing cluster has been stopped", "cluster", clusterName) return nil } @@ -170,15 +281,15 @@ func (r *SyncReconciler) startCluster(ctx context.Context, clusterName string) e } // handleClusterAddOrUpdate is responsible for handling the addition or update of a cluster resource. -func (r *SyncReconciler) handleClusterAddOrUpdate(ctx context.Context, cluster *clusterv1beta1.Cluster) error { +func (r *SyncReconciler) handleClusterAddOrUpdate(ctx context.Context, cluster *clusterv1beta1.Cluster, buildClusterConfigFunc buildClusterConfigFunc, filterForSdkFunc filterForSdkFunc) error { logger := ctrl.LoggerFrom(ctx) - resources, pendingWildcards, err := r.getResources(ctx, cluster) + allResources, validResources, pendingWildcards, err := r.getResources(ctx, cluster, filterForSdkFunc) if err != nil { return errors.Wrapf(err, "error detecting sync resources of the cluster %s", cluster.Name) } - hasResources := len(resources) > 0 + hasResources := len(allResources) > 0 hasWildcards := len(pendingWildcards) > 0 if !hasResources && !hasWildcards { @@ -186,7 +297,7 @@ func (r *SyncReconciler) handleClusterAddOrUpdate(ctx context.Context, cluster * return r.stopCluster(ctx, cluster.Name) } - clusterConfig, err := buildClusterConfig(cluster) + clusterConfig, err := buildClusterConfigFunc(cluster) if err != nil { return errors.Wrapf(err, "failed to build config for cluster %s", cluster.Name) } @@ -214,35 +325,86 @@ func (r *SyncReconciler) handleClusterAddOrUpdate(ctx context.Context, cluster * if hasWildcards { klog.Infof("Processing %d pending wildcard resources for cluster %s", len(pendingWildcards), cluster.Name) - wildcardResources, err := r.processWildcardResources(ctx, pendingWildcards, singleMgr, cluster.Name) + wildcardResources, err := r.processWildcardResources(ctx, pendingWildcards, singleMgr, cluster.Name, filterForSdkFunc) if err != nil { return errors.Wrapf(err, "failed to process wildcard resources for cluster %s", cluster.Name) } // Add the discovered resources to our resources list - resources = append(resources, wildcardResources...) - klog.Infof("Added %d resources from wildcards to the sync list for cluster %s", len(wildcardResources), cluster.Name) + allResources = append(allResources, wildcardResources...) + validResources = append(validResources, wildcardResources...) + logger.Info("Added resources from wildcards to the sync list for cluster", "resources", strconv.Itoa(len(wildcardResources)), "cluster", cluster.Name) } - if len(resources) == 0 { + // double check for cluster without resources + if len(allResources) == 0 { logger.Info("after processing wildcards, cluster still has no resources to sync", "cluster", cluster.Name) return r.stopCluster(ctx, cluster.Name) } - if err := singleMgr.UpdateSyncResources(ctx, resources); err != nil { + if err := singleMgr.UpdateSyncResources(ctx, validResources); err != nil { return errors.Wrapf(err, "failed to update sync resources for cluster %s", cluster.Name) } return nil } +// handleClusterAddOrUpdateForPush dispatches the relevant crds resources to user cluster for a push mode cluster resource in high-availability scene. +func (r *SyncReconciler) handleClusterAddOrUpdateForPush(ctx context.Context, cluster *clusterv1beta1.Cluster) error { + logger := ctrl.LoggerFrom(ctx) + logger.V(5).Info("handle cluster has been added/updated, push mode", "cluster", cluster.Name) + + // build user client + clusterConfig, err := buildClusterConfigInSyncer(cluster) + if err != nil { + return errors.Wrapf(err, "failed to build config for cluster %s", cluster.Name) + } + dynamicClient, err := dynamic.NewForConfig(clusterConfig) + if err != nil { + return errors.Wrapf(err, "failed to build dynamic client for cluster %s", cluster.Name) + } + + // must apply crds before other resources. + err = utils.ApplyCrds(ctx, dynamicClient) + if err != nil { + return errors.Wrapf(err, "failed to apply crds for cluster %s", cluster.Name) + } + + err = r.dispatchResources(ctx, dynamicClient, cluster) + if err != nil { + return errors.Wrapf(err, "failed to dispatch resources for cluster %s", cluster.Name) + } + + err = r.generateAgentYaml(ctx, cluster) + if err != nil { + return errors.Wrapf(err, "failed to generate agent yaml for push mode cluster %s", cluster.Name) + } + + return err +} + +// handleClusterAddOrUpdate generate the cert for agent in user cluster +func (r *SyncReconciler) handleClusterAddOrUpdateForPull(ctx context.Context, cluster *clusterv1beta1.Cluster) error { + err := r.createClusterRoleAndBinding(ctx, cluster) + if err != nil { + return err + } + + err = r.generateAgentYaml(ctx, cluster) + if err != nil { + return errors.Wrapf(err, "failed to generate agent yaml for pull mmode cluster %s", cluster.Name) + } + return nil +} + // getResources retrieves the list of resource sync rules for the given cluster. -func (r *SyncReconciler) getResources(ctx context.Context, cluster *clusterv1beta1.Cluster) ([]*searchv1beta1.ResourceSyncRule, []*searchv1beta1.ResourceSyncRule, error) { +func (r *SyncReconciler) getResources(ctx context.Context, cluster *clusterv1beta1.Cluster, filterForSdkFunc filterForSdkFunc) ([]*searchv1beta1.ResourceSyncRule, []*searchv1beta1.ResourceSyncRule, []*searchv1beta1.ResourceSyncRule, error) { registries, err := r.getRegistries(ctx, cluster) if err != nil { - return nil, nil, err + return nil, nil, nil, err } var allResources []*searchv1beta1.ResourceSyncRule + var validResources []*searchv1beta1.ResourceSyncRule var pendingWildcards []*searchv1beta1.ResourceSyncRule for _, registry := range registries { @@ -252,18 +414,22 @@ func (r *SyncReconciler) getResources(ctx context.Context, cluster *clusterv1bet resources, wildcards, err := r.getNormalizedResources(ctx, ®istry) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - for _, r := range resources { - allResources = append(allResources, r) + for gvr, rule := range resources { + allResources = append(allResources, rule) + if filterForSdkFunc == nil || filterForSdkFunc(ctx, gvr, rule) { + validResources = append(validResources, rule) + } } for _, w := range wildcards { pendingWildcards = append(pendingWildcards, w) } } - return allResources, pendingWildcards, nil + + return allResources, validResources, pendingWildcards, nil } // getNormalizedResources retrieves the normalized resource sync rules from the given sync registry. @@ -316,6 +482,224 @@ func (r *SyncReconciler) getNormalizedResources(ctx context.Context, registry *s return ret, pendingWildcards, nil } +// dispatchResources dispatch cluster, syncregistry, syncresource, transformrule and trimrule resource to user cluster. +func (r *SyncReconciler) dispatchResources(ctx context.Context, dynamicClient dynamic.Interface, cluster *clusterv1beta1.Cluster) error { + // collect the resources needed to be dispatched + unstructuredObjectMap := map[schema.GroupVersionResource][]unstructured.Unstructured{} + err := r.getUnstructuredCluster(cluster, unstructuredObjectMap) + if err != nil { + return errors.Wrapf(err, "error get unstructured cluster cr for cluster %s", cluster.Name) + } + + err = r.getUnstructuredRegistries(ctx, cluster, unstructuredObjectMap) + if err != nil { + return errors.Wrapf(err, "error get unstructured objects of the syncregistries for cluster %s", cluster.Name) + } + + // dispatch resources + var errs []error + for gvr := range unstructuredObjectMap { + for idx := range unstructuredObjectMap[gvr] { + unstructuredObj := unstructuredObjectMap[gvr][idx] + err := utils.CreateOrUpdateUnstructured(ctx, dynamicClient, gvr, "", &unstructuredObj) + if err != nil { + errs = append(errs, err) + } + } + } + + return utilErr.NewAggregate(errs) +} + +// createClusterRoleAndBinding create clusterrole and clusterrolebinding for agent in pull mode +func (r *SyncReconciler) createClusterRoleAndBinding(ctx context.Context, cluster *clusterv1beta1.Cluster) error { + clusterRoleName := fmt.Sprintf("%s-clusterrole", cluster.Name) + clusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleName, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{clusterv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"clusters"}, + Verbs: []string{"get"}, + ResourceNames: []string{cluster.Name}, + }, + { + APIGroups: []string{clusterv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"clusters"}, + Verbs: []string{"list", "watch"}, + }, + { + APIGroups: []string{searchv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"syncregistries"}, + Verbs: []string{"get", "watch", "list"}, + }, + { + APIGroups: []string{searchv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"syncclusterresources"}, + Verbs: []string{"get", "watch", "list"}, + }, + { + APIGroups: []string{searchv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"transformrules"}, + Verbs: []string{"get", "watch", "list"}, + }, + { + APIGroups: []string{searchv1beta1.SchemeGroupVersion.Group}, + Resources: []string{"trimrules"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + } + + err := r.client.Create(ctx, clusterRole) + if err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed to create clusterrole %s", clusterRole.Name) + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-clusterrolebinding", cluster.Name), + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: rbacv1.UserKind, + Name: cluster.Name, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + } + + err = r.client.Create(ctx, clusterRoleBinding) + if err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed to create clusterrolebinding %s", clusterRoleBinding.Name) + } + + return nil +} + +// generateAgentYaml generate the agent yaml to be deployed in user cluster, and save yaml into secret. +func (r *SyncReconciler) generateAgentYaml(ctx context.Context, cluster *clusterv1beta1.Cluster) error { + var certData, keyData []byte + if cluster.Spec.Mode == clusterv1beta1.PullClusterMode { + certConfig := certgenerator.Config{ + CAName: "kubernetes", + CommonName: cluster.Name, + Year: 100, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + cert, key, err := certgenerator.NewCaCertAndKeyFromRoot(certConfig, r.caCert, r.caKey) + if err != nil { + return err + } + certData = certgenerator.EncodeCertPEM(cert) + keyData, err = keyutil.MarshalPrivateKeyToPEM(key) + if err != nil { + return fmt.Errorf("unable to marshal private key to PEM %s", err) + } + } + + agentYml, err := r.renderYamlFile(cluster, base64.StdEncoding.EncodeToString(certData), base64.StdEncoding.EncodeToString(keyData)) + if err != nil { + return errors.Wrap(err, "failed to render agent yaml") + } + + err = applyAgentYmlSecret(ctx, r.client, cluster, []byte(agentYml)) + if err != nil { + return err + } + + return nil +} + +// getUnstructuredCluster retrieves the cluster cr for the given cluster. +func (r *SyncReconciler) getUnstructuredCluster(cluster *clusterv1beta1.Cluster, unstructuredObjectMap map[schema.GroupVersionResource][]unstructured.Unstructured) error { + // only dispatch cluster cr once + unstructuredObj, err := utils.ConvertToUnstructured(cluster) + if err != nil { + return errors.Wrapf(err, "failed to convert to unstructured object") + } + unstructuredObjectMap[clusterv1beta1.SchemeGroupVersion.WithResource("clusters")] = []unstructured.Unstructured{*unstructuredObj} + + return nil +} + +// getRegistries retrieves the list of sync registries for the given cluster. +func (r *SyncReconciler) getUnstructuredRegistries(ctx context.Context, cluster *clusterv1beta1.Cluster, unstructuredObjectMap map[schema.GroupVersionResource][]unstructured.Unstructured) error { + registries, err := r.getRegistries(ctx, cluster) + if err != nil { + return errors.Wrapf(err, "failed to get registries") + } + + // init map value + unstructuredRegistries := make([]unstructured.Unstructured, 0, len(registries)) + var unstructuredTransformRules []unstructured.Unstructured + var unstructuredTrimRules []unstructured.Unstructured + + // avoid to collect duplicate cr + transformRuleMap := make(map[string]struct{}) + trimRuleMap := make(map[string]struct{}) + + // collect cr list + for idx := range registries { + registry := registries[idx] + // set special cluster name when dispatching + registry.Spec.Clusters = []string{cluster.Name} + + unstructuredObj, err := utils.ConvertToUnstructured(®istry) + if err != nil { + return errors.Wrapf(err, "failed to convert to unstructured object for registry %s", registry.Name) + } + // do not set status when update + unstructuredObj.Object["status"] = nil + unstructuredRegistries = append(unstructuredRegistries, *unstructuredObj) + + // obtain relevant cr + for _, sr := range registry.Spec.SyncResources { + if _, ok := transformRuleMap[sr.TransformRefName]; !ok && sr.TransformRefName != "" { + rule, err := r.extractTransformRule(ctx, &sr) + if err != nil { + return err + } + unstructuredObj, err = utils.ConvertToUnstructured(rule) + if err != nil { + return errors.Wrapf(err, "failed to convert to unstructured object for transformrule %s", rule.Name) + } + + unstructuredTransformRules = append(unstructuredTransformRules, *unstructuredObj) + transformRuleMap[sr.TransformRefName] = struct{}{} + } + + if _, ok := trimRuleMap[sr.TrimRefName]; !ok && sr.TrimRefName != "" { + rule, err := r.extractTrimRule(ctx, &sr) + if err != nil { + return err + } + unstructuredObj, err = utils.ConvertToUnstructured(rule) + if err != nil { + return errors.Wrapf(err, "failed to convert to unstructured object for trimrule %s", rule.Name) + } + + unstructuredTrimRules = append(unstructuredTrimRules, *unstructuredObj) + transformRuleMap[sr.TrimRefName] = struct{}{} + } + } + } + + // set map + unstructuredObjectMap[searchv1beta1.SchemeGroupVersion.WithResource("syncregistries")] = unstructuredRegistries + unstructuredObjectMap[searchv1beta1.SchemeGroupVersion.WithResource("transformrules")] = unstructuredTransformRules + unstructuredObjectMap[searchv1beta1.SchemeGroupVersion.WithResource("trimrules")] = unstructuredTrimRules + + return nil +} + // getRegistries retrieves the list of sync registries for the given cluster. func (r *SyncReconciler) getRegistries(ctx context.Context, cluster *clusterv1beta1.Cluster) ([]searchv1beta1.SyncRegistry, error) { var syncRegistriesList searchv1beta1.SyncRegistryList @@ -379,33 +763,17 @@ func (r *SyncReconciler) getNormalizedResource(ctx context.Context, rsr *searchv normalized := rsr.DeepCopy() if rsr.TransformRefName != "" { - if rsr.Transform != nil { - return nil, fmt.Errorf("specify both Transform and TransformRefName in ResourceSyncRule is not allowed") - } - - var rule searchv1beta1.TransformRule - err := r.client.Get(ctx, types.NamespacedName{Name: rsr.TransformRefName}, &rule) + rule, err := r.extractTransformRule(ctx, rsr) if err != nil { - if apierrors.IsNotFound(err) { - return nil, fmt.Errorf("TransformRule referenced by name %q doesn't exist", rsr.TransformRefName) - } - return nil, errors.Wrapf(err, "failed to list transformRule %s from lister", rsr.TransformRefName) + return nil, err } normalized.Transform = &rule.Spec } if rsr.TrimRefName != "" { - if rsr.Trim != nil { - return nil, fmt.Errorf("specify both Trim and TrimRefName in ResourceSyncRule is not allowed") - } - - var rule searchv1beta1.TrimRule - err := r.client.Get(ctx, types.NamespacedName{Name: rsr.TrimRefName}, &rule) + rule, err := r.extractTrimRule(ctx, rsr) if err != nil { - if apierrors.IsNotFound(err) { - return nil, fmt.Errorf("TrimRule referenced by name %q doesn't exist", rsr.TrimRefName) - } - return nil, errors.Wrapf(err, "failed to list trimRule %s from lister", rsr.TrimRefName) + return nil, err } normalized.Trim = &rule.Spec } @@ -413,65 +781,85 @@ func (r *SyncReconciler) getNormalizedResource(ctx context.Context, rsr *searchv return normalized, nil } -// buildClusterConfig creates a rest.Config object for the given cluster. -func buildClusterConfig(cluster *clusterv1beta1.Cluster) (*rest.Config, error) { - access := cluster.Spec.Access - if len(access.Endpoint) == 0 { - return nil, fmt.Errorf("cluster %s's endpoint is empty", cluster.Name) +// extractTransformRule extras transform rules from syncrule +func (r *SyncReconciler) extractTransformRule(ctx context.Context, rsr *searchv1beta1.ResourceSyncRule) (*searchv1beta1.TransformRule, error) { + var rule searchv1beta1.TransformRule + if rsr.Transform != nil { + return nil, fmt.Errorf("specify both Transform and TransformRefName in ResourceSyncRule is not allowed") } - config := rest.Config{ - Host: access.Endpoint, - } - if access.Insecure != nil { - config.TLSClientConfig.Insecure = *access.Insecure + + err := r.client.Get(ctx, types.NamespacedName{Name: rsr.TransformRefName}, &rule) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, fmt.Errorf("TransformRule referenced by name %q doesn't exist", rsr.TransformRefName) + } + return nil, errors.Wrapf(err, "failed to list transformRule %s from lister", rsr.TransformRefName) } - if len(access.CABundle) > 0 { - config.TLSClientConfig.CAData = access.CABundle + return &rule, nil +} + +// extractTransformRule extras trim rules from syncrule +func (r *SyncReconciler) extractTrimRule(ctx context.Context, rsr *searchv1beta1.ResourceSyncRule) (*searchv1beta1.TrimRule, error) { + var rule searchv1beta1.TrimRule + if rsr.Trim != nil { + return nil, fmt.Errorf("specify both Trim and TrimRefName in ResourceSyncRule is not allowed") } - if access.Credential != nil { - switch access.Credential.Type { - case clusterv1beta1.CredentialTypeServiceAccountToken: - config.BearerToken = access.Credential.ServiceAccountToken - case clusterv1beta1.CredentialTypeX509Certificate: - if access.Credential.X509 == nil { - return nil, fmt.Errorf("cert and key is required for x509 credential type") - } - config.TLSClientConfig.CertData = access.Credential.X509.Certificate - config.TLSClientConfig.KeyData = access.Credential.X509.PrivateKey - case clusterv1beta1.CredentialTypeOIDC: - if access.Credential.ExecConfig == nil { - return nil, fmt.Errorf("ExecConfig is required for Exec credential type") - } - var env []clientcmdapi.ExecEnvVar - for _, envValue := range access.Credential.ExecConfig.Env { - tempEnv := clientcmdapi.ExecEnvVar{ - Name: envValue.Name, - Value: envValue.Value, - } - env = append(env, tempEnv) - } - config.ExecProvider = &clientcmdapi.ExecConfig{ - Command: access.Credential.ExecConfig.Command, - Args: access.Credential.ExecConfig.Args, - Env: env, - APIVersion: access.Credential.ExecConfig.APIVersion, - InstallHint: access.Credential.ExecConfig.InstallHint, - ProvideClusterInfo: access.Credential.ExecConfig.ProvideClusterInfo, - InteractiveMode: clientcmdapi.ExecInteractiveMode(access.Credential.ExecConfig.InteractiveMode), - } - default: - return nil, fmt.Errorf("unknown credential type %v", access.Credential.Type) + + err := r.client.Get(ctx, types.NamespacedName{Name: rsr.TrimRefName}, &rule) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, fmt.Errorf("TrimRule referenced by name %q doesn't exist", rsr.TrimRefName) } + return nil, errors.Wrapf(err, "failed to list trimRule %s from lister", rsr.TrimRefName) } - return &config, nil + return &rule, nil +} + +// renderYamlFile render agent yaml use known parameters. +func (r *SyncReconciler) renderYamlFile(cluster *clusterv1beta1.Cluster, certData, keyData string) (string, error) { + c := template.Config{ + ClusterName: cluster.Name, + Level: cluster.Spec.Level, + StorageAddresses: r.storageAddresses, + ClusterMode: cluster.Spec.Mode, + AgentImageTag: r.agentImageTag, + } + + if cluster.Spec.Mode == clusterv1beta1.PullClusterMode { + c.ExternalEndpoint = r.externalEndpoint + c.CaCert = certData + c.CaKey = keyData + } + + agentYml, err := templateUtil.New("").Parse(string(config.AgentTpl)) + if err != nil { + return "", errors.Wrap(err, "failed to parse agent yaml") + } + + var renderedTemplate bytes.Buffer + err = agentYml.Execute(&renderedTemplate, c) + if err != nil { + return "", errors.Wrap(err, "failed to render agent yaml") + } + + return renderedTemplate.String(), nil +} + +// isReconcilerEnabled verify if execute for cluster object, which can compatible for existed non-ha cluster. +func (r *SyncReconciler) isReconcilerEnabled(cluster *clusterv1beta1.Cluster) bool { + if r.highAvailability && cluster.Spec.Mode != clusterv1beta1.PushClusterMode && r.caCert == nil && r.caKey == nil { + return false + } + return true } // processWildcardResources processes wildcard resources using the singleClusterSyncManager's discoveryClient func (r *SyncReconciler) processWildcardResources( - _ context.Context, + ctx context.Context, wildcards []*searchv1beta1.ResourceSyncRule, singleClusterMgr SingleClusterSyncManager, clusterName string, + filterForSdkFunc filterForSdkFunc, ) ([]*searchv1beta1.ResourceSyncRule, error) { var result []*searchv1beta1.ResourceSyncRule @@ -513,6 +901,14 @@ func (r *SyncReconciler) processWildcardResources( resourceRule := nr.DeepCopy() resourceRule.Resource = apiResource.Name + gvr, err := parseGVR(resourceRule) + if err != nil { + return nil, errors.Wrapf(err, "error in parsing GVR") + } + if filterForSdkFunc != nil && !filterForSdkFunc(ctx, gvr, resourceRule) { + continue + } + result = append(result, resourceRule) klog.Infof("Created sync rule for resource %s in GroupVersion %s (GVR: %s) for cluster %s", @@ -522,3 +918,91 @@ func (r *SyncReconciler) processWildcardResources( return result, nil } + +// buildClusterConfigInSyncer creates a rest.Config object for the given cluster in syncer. +func buildClusterConfigInSyncer(cluster *clusterv1beta1.Cluster) (*rest.Config, error) { + access := cluster.Spec.Access + if len(access.Endpoint) == 0 { + return nil, fmt.Errorf("cluster %s's endpoint is empty", cluster.Name) + } + config := rest.Config{ + Host: access.Endpoint, + } + if access.Insecure != nil { + config.TLSClientConfig.Insecure = *access.Insecure + } + if len(access.CABundle) > 0 { + config.TLSClientConfig.CAData = access.CABundle + } + if access.Credential != nil { + switch access.Credential.Type { + case clusterv1beta1.CredentialTypeServiceAccountToken: + config.BearerToken = access.Credential.ServiceAccountToken + case clusterv1beta1.CredentialTypeX509Certificate: + if access.Credential.X509 == nil { + return nil, fmt.Errorf("cert and key is required for x509 credential type") + } + config.TLSClientConfig.CertData = access.Credential.X509.Certificate + config.TLSClientConfig.KeyData = access.Credential.X509.PrivateKey + case clusterv1beta1.CredentialTypeOIDC: + if access.Credential.ExecConfig == nil { + return nil, fmt.Errorf("ExecConfig is required for Exec credential type") + } + var env []clientcmdapi.ExecEnvVar + for _, envValue := range access.Credential.ExecConfig.Env { + tempEnv := clientcmdapi.ExecEnvVar{ + Name: envValue.Name, + Value: envValue.Value, + } + env = append(env, tempEnv) + } + config.ExecProvider = &clientcmdapi.ExecConfig{ + Command: access.Credential.ExecConfig.Command, + Args: access.Credential.ExecConfig.Args, + Env: env, + APIVersion: access.Credential.ExecConfig.APIVersion, + InstallHint: access.Credential.ExecConfig.InstallHint, + ProvideClusterInfo: access.Credential.ExecConfig.ProvideClusterInfo, + InteractiveMode: clientcmdapi.ExecInteractiveMode(access.Credential.ExecConfig.InteractiveMode), + } + default: + return nil, fmt.Errorf("unknown credential type %v", access.Credential.Type) + } + } + return &config, nil +} + +// applyAgentYmlSecret apply agent yml +func applyAgentYmlSecret(ctx context.Context, cli client.Client, cluster *clusterv1beta1.Cluster, content []byte) error { + secretName := fmt.Sprintf("%s-agent", cluster.Name) + newAgentSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: "karpor", + }, + Data: map[string][]byte{ + "config": content, + }, + } + + oldAgentSecret := &v1.Secret{} + err := cli.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: "karpor", + }, oldAgentSecret) + if err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to create agent secret") + } + err = cli.Create(ctx, newAgentSecret) + if err != nil { + return errors.Wrap(err, "failed to update agent secret") + } + } else if !reflect.DeepEqual(oldAgentSecret.Data, newAgentSecret.Data) { + err = cli.Update(ctx, newAgentSecret) + if err != nil { + return errors.Wrap(err, "failed to update agent secret") + } + } + return nil +} diff --git a/pkg/syncer/reconciler_test.go b/pkg/syncer/reconciler_test.go index 8eff23bb..56f026c0 100644 --- a/pkg/syncer/reconciler_test.go +++ b/pkg/syncer/reconciler_test.go @@ -17,14 +17,11 @@ package syncer import ( "context" + "crypto" + "crypto/x509" "fmt" "testing" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" - searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" - "github.com/KusionStack/karpor/pkg/kubernetes/scheme" "github.com/bytedance/mockey" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -36,9 +33,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/kubernetes/scheme" + "github.com/KusionStack/karpor/pkg/syncer/utils" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + dynamicfake "k8s.io/client-go/dynamic/fake" ) -func Test_buildClusterConfig(t *testing.T) { +func Test_buildClusterConfigInSyncer(t *testing.T) { tests := []struct { name string cluster *clusterv1beta1.Cluster @@ -81,7 +89,7 @@ func Test_buildClusterConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := buildClusterConfig(tt.cluster) + got, err := buildClusterConfigInSyncer(tt.cluster) if tt.wantErr { require.Error(t, err) } else { @@ -463,22 +471,43 @@ func TestSyncReconciler_getRegistries(t *testing.T) { func TestNewSyncReconciler(t *testing.T) { tests := []struct { - name string - storage storage.ResourceStorage + name string + storage storage.ResourceStorage + highAvailability bool + storageAddresses []string + externalEndpoint string + agentImageTag string + caCert *x509.Certificate + caKey crypto.Signer }{ { "test nil", nil, + false, + []string{"127.0.0.1"}, + "127.0.0.1", + "v1.0.0", + nil, + nil, }, { "test not nil", &elasticsearch.Storage{}, + false, + []string{"127.0.0.1"}, + "127.0.0.1", + "v1.0.0", + nil, + nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := NewSyncReconciler(tt.storage) + got := NewSyncReconciler(tt.storage, tt.highAvailability, tt.storageAddresses, + tt.externalEndpoint, tt.agentImageTag, tt.caCert, tt.caKey) require.Equal(t, got.storage, tt.storage) + require.Equal(t, got.highAvailability, tt.highAvailability) + require.Equal(t, got.storageAddresses, tt.storageAddresses) }) } } @@ -568,7 +597,146 @@ func TestSyncReconciler_getResources(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &SyncReconciler{client: fake.NewClientBuilder().WithRuntimeObjects(tt.srs...).WithScheme(scheme.Scheme).Build()} - got, _, err := r.getResources(context.TODO(), tt.cluster) + allResources, _, _, err := r.getResources(context.TODO(), tt.cluster, nil) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, allResources) + } + }) + } +} + +func TestSyncReconciler_processWildcardResources(t *testing.T) { + tests := []struct { + name string + wildcards []*searchv1beta1.ResourceSyncRule + apiResources []metav1.APIResource + clusterName string + want []*searchv1beta1.ResourceSyncRule + wantErr bool + }{ + { + name: "successfully process wildcard resources", + wildcards: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "*", + }, + }, + apiResources: []metav1.APIResource{ + { + Name: "foos", + Namespaced: true, + }, + { + Name: "bars", + Namespaced: false, + }, + }, + clusterName: "cluster1", + want: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "foos", + }, + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "bars", + }, + }, + wantErr: false, + }, + { + name: "skip subresources", + wildcards: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "*", + }, + }, + apiResources: []metav1.APIResource{ + { + Name: "foos/status", + Namespaced: true, + }, + { + Name: "foos", + Namespaced: true, + }, + }, + clusterName: "cluster1", + want: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "foos", + }, + }, + wantErr: false, + }, + { + name: "skip cluster-scoped resources when namespace specified", + wildcards: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "*", + Namespace: "default", + }, + }, + apiResources: []metav1.APIResource{ + { + Name: "bars", + Namespaced: false, + }, + { + Name: "foos", + Namespaced: true, + }, + }, + clusterName: "cluster1", + want: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "samplecontroller.k8s.io/v1alpha1", + Resource: "foos", + Namespace: "default", + }, + }, + wantErr: false, + }, + { + name: "invalid group version", + wildcards: []*searchv1beta1.ResourceSyncRule{ + { + APIVersion: "invalid", + Resource: "*", + }, + }, + apiResources: []metav1.APIResource{}, + clusterName: "cluster1", + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockManager := &mock.Mock{} + if tt.wantErr { + mockManager.On("GetAPIResources", mock.Anything).Return(nil, fmt.Errorf("failed to get API resources")) + } else { + mockManager.On("GetAPIResources", mock.Anything).Return( + &metav1.APIResourceList{ + GroupVersion: tt.wildcards[0].APIVersion, + APIResources: tt.apiResources, + }, + nil, + ) + } + + r := &SyncReconciler{} + got, err := r.processWildcardResources(context.TODO(), tt.wildcards, &fakeSingleClusterSyncManager{mockManager}, tt.clusterName, nil) + if tt.wantErr { require.Error(t, err) } else { @@ -670,70 +838,76 @@ func TestSyncReconciler_handleClusterAddOrUpdate(t *testing.T) { }, exist: false, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m1 := &mock.Mock{} + m1.On("UpdateSyncResources", mock.Anything, mock.Anything).Return(nil) + m1.On("ClusterConfig").Return(tt.config) + m2 := &mock.Mock{} + m2.On("GetForCluster", mock.Anything).Return(&fakeSingleClusterSyncManager{m1}, tt.exist) + m2.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(&fakeSingleClusterSyncManager{m1}, nil) + m2.On("Start", mock.Anything, mock.Anything).Return(nil) + r := &SyncReconciler{ + mgr: &fakeMultiClusterSyncManager{m2}, + client: fake.NewClientBuilder().WithRuntimeObjects(tt.srs...).WithScheme(scheme.Scheme).Build(), + } + err := r.handleClusterAddOrUpdate(context.TODO(), tt.cluster, buildClusterConfigInSyncer, nil) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestSyncReconciler_dispatchResources(t *testing.T) { + tests := []struct { + name string + ctx context.Context + cluster *clusterv1beta1.Cluster + dynamicClient dynamic.Interface + objects []runtime.Object + wantErr bool + }{ { - name: "test wildcard", + name: "test no error", + ctx: context.Background(), cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, Spec: clusterv1beta1.ClusterSpec{ - Access: clusterv1beta1.ClusterAccess{ - Endpoint: "https://localhost:6443", - CABundle: []byte("ca"), - Credential: &clusterv1beta1.ClusterAccessCredential{ - Type: clusterv1beta1.CredentialTypeX509Certificate, - X509: &clusterv1beta1.X509{ - Certificate: []byte("cert"), - PrivateKey: []byte("key"), - }, - }, - }, + Mode: clusterv1beta1.PullClusterMode, + Level: 2, }, }, - srs: []runtime.Object{ + dynamicClient: &dynamicfake.FakeDynamicClient{}, + objects: []runtime.Object{ &searchv1beta1.SyncRegistry{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "1", }, Spec: searchv1beta1.SyncRegistrySpec{ Clusters: []string{"cluster1"}, - SyncResources: []searchv1beta1.ResourceSyncRule{{APIVersion: "samplecontroller.k8s.io/v1alpha1", Resource: "*"}}, + SyncResources: []searchv1beta1.ResourceSyncRule{{APIVersion: "v1", Resource: "pods", TransformRefName: "tfr1", TrimRefName: "tr1"}}, }, }, + &searchv1beta1.TransformRule{ObjectMeta: metav1.ObjectMeta{Name: "tfr1"}, Spec: searchv1beta1.TransformRuleSpec{}}, + &searchv1beta1.TrimRule{ObjectMeta: metav1.ObjectMeta{Name: "tr1"}, Spec: searchv1beta1.TrimRuleSpec{}}, }, - config: &rest.Config{ - Host: "https://localhost:6443", - TLSClientConfig: rest.TLSClientConfig{ - CAData: []byte("ca"), - CertData: []byte("cert"), - KeyData: []byte("key"), - }, - }, - exist: true, + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.name == "test wildcard" { - m := mockey.Mock((*SyncReconciler).processWildcardResources).To( - func(_ context.Context, _ []*searchv1beta1.ResourceSyncRule, _ SingleClusterSyncManager, _ string) ([]*searchv1beta1.ResourceSyncRule, error) { - return []*searchv1beta1.ResourceSyncRule{{APIVersion: "samplecontroller.k8s.io/v1alpha1", Resource: "foos"}}, nil - }).Build() - defer m.UnPatch() - } + mockey.Mock(utils.CreateOrUpdateUnstructured).Return(nil).Build() + mockey.Mock((*SyncReconciler).getUnstructuredRegistries).Return(nil).Build() + defer mockey.UnPatchAll() - m1 := &mock.Mock{} - m1.On("UpdateSyncResources", mock.Anything, mock.Anything).Return(nil) - m1.On("ClusterConfig").Return(tt.config) - m2 := &mock.Mock{} - m2.On("GetForCluster", mock.Anything).Return(&fakeSingleClusterSyncManager{m1}, tt.exist) - m2.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(&fakeSingleClusterSyncManager{m1}, nil) - m2.On("Start", mock.Anything, mock.Anything).Return(nil) - r := &SyncReconciler{ - mgr: &fakeMultiClusterSyncManager{m2}, - client: fake.NewClientBuilder().WithRuntimeObjects(tt.srs...).WithScheme(scheme.Scheme).Build(), - } - err := r.handleClusterAddOrUpdate(context.TODO(), tt.cluster) + r := &SyncReconciler{client: fake.NewClientBuilder().WithRuntimeObjects(tt.objects...).WithScheme(scheme.Scheme).Build()} + err := r.dispatchResources(tt.ctx, tt.dynamicClient, tt.cluster) if tt.wantErr { require.Error(t, err) } else { @@ -743,135 +917,106 @@ func TestSyncReconciler_handleClusterAddOrUpdate(t *testing.T) { } } -func TestSyncReconciler_processWildcardResources(t *testing.T) { +func TestSyncReconciler_getUnstructuredRegistries(t *testing.T) { tests := []struct { - name string - wildcards []*searchv1beta1.ResourceSyncRule - apiResources []metav1.APIResource - clusterName string - want []*searchv1beta1.ResourceSyncRule - wantErr bool + name string + ctx context.Context + cluster *clusterv1beta1.Cluster + unstructuredObjectMap map[schema.GroupVersionResource][]unstructured.Unstructured + objects []runtime.Object + wantErr bool }{ { - name: "successfully process wildcard resources", - wildcards: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "*", - }, - }, - apiResources: []metav1.APIResource{ - { - Name: "foos", - Namespaced: true, + name: "test transform and trim rule", + cluster: &clusterv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", }, - { - Name: "bars", - Namespaced: false, + Spec: clusterv1beta1.ClusterSpec{ + Mode: clusterv1beta1.PullClusterMode, + Level: 2, }, }, - clusterName: "cluster1", - want: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "foos", - }, - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "bars", + unstructuredObjectMap: map[schema.GroupVersionResource][]unstructured.Unstructured{}, + objects: []runtime.Object{ + &searchv1beta1.SyncRegistry{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "1", + }, + Spec: searchv1beta1.SyncRegistrySpec{ + Clusters: []string{"cluster1"}, + SyncResources: []searchv1beta1.ResourceSyncRule{{APIVersion: "v1", Resource: "pods", TransformRefName: "tfr1", TrimRefName: "tr1"}}, + }, }, + &searchv1beta1.TransformRule{ObjectMeta: metav1.ObjectMeta{Name: "tfr1"}, Spec: searchv1beta1.TransformRuleSpec{}}, + &searchv1beta1.TrimRule{ObjectMeta: metav1.ObjectMeta{Name: "tr1"}, Spec: searchv1beta1.TrimRuleSpec{}}, }, wantErr: false, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &SyncReconciler{client: fake.NewClientBuilder().WithRuntimeObjects(tt.objects...).WithScheme(scheme.Scheme).Build()} + err := r.getUnstructuredRegistries(tt.ctx, tt.cluster, tt.unstructuredObjectMap) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestSyncReconciler_renderYamlFile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1beta1.Cluster + certData string + keyData string + want string + wantErr bool + }{ { - name: "skip subresources", - wildcards: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "*", - }, - }, - apiResources: []metav1.APIResource{ - { - Name: "foos/status", - Namespaced: true, - }, - { - Name: "foos", - Namespaced: true, + name: "test pull mode", + cluster: &clusterv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", }, - }, - clusterName: "cluster1", - want: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "foos", + Spec: clusterv1beta1.ClusterSpec{ + Mode: clusterv1beta1.PullClusterMode, + Level: 2, }, }, - wantErr: false, + certData: "cert", + keyData: "key", + want: renderResForPull, + wantErr: false, }, { - name: "skip cluster-scoped resources when namespace specified", - wildcards: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "*", - Namespace: "default", - }, - }, - apiResources: []metav1.APIResource{ - { - Name: "bars", - Namespaced: false, - }, - { - Name: "foos", - Namespaced: true, - }, - }, - clusterName: "cluster1", - want: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "samplecontroller.k8s.io/v1alpha1", - Resource: "foos", - Namespace: "default", + name: "test pull mode", + cluster: &clusterv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", }, - }, - wantErr: false, - }, - { - name: "invalid group version", - wildcards: []*searchv1beta1.ResourceSyncRule{ - { - APIVersion: "invalid", - Resource: "*", + Spec: clusterv1beta1.ClusterSpec{ + Mode: clusterv1beta1.PushClusterMode, + Level: 2, }, }, - apiResources: []metav1.APIResource{}, - clusterName: "cluster1", - want: nil, - wantErr: true, + certData: "cert", + keyData: "key", + want: renderResForPush, + wantErr: false, }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockManager := &mock.Mock{} - if tt.wantErr { - mockManager.On("GetAPIResources", mock.Anything).Return(nil, fmt.Errorf("failed to get API resources")) - } else { - mockManager.On("GetAPIResources", mock.Anything).Return( - &metav1.APIResourceList{ - GroupVersion: tt.wildcards[0].APIVersion, - APIResources: tt.apiResources, - }, - nil, - ) + r := &SyncReconciler{ + storageAddresses: []string{"https://localhost:6443"}, + agentImageTag: "latest", + externalEndpoint: "https://localhost:6443", } - - r := &SyncReconciler{} - got, err := r.processWildcardResources(context.TODO(), tt.wildcards, &fakeSingleClusterSyncManager{mockManager}, tt.clusterName) - + got, err := r.renderYamlFile(tt.cluster, tt.certData, tt.keyData) if tt.wantErr { require.Error(t, err) } else { @@ -881,3 +1026,8 @@ func TestSyncReconciler_processWildcardResources(t *testing.T) { }) } } + +const ( + renderResForPull = "apiVersion: v1\nkind: Namespace\nmetadata:\n name: karpor\nspec:\n finalizers:\n - kubernetes\n---\napiVersion: v1\ndata:\n config: |-\n apiVersion: v1\n clusters:\n - cluster:\n insecure-skip-tls-verify: true\n server: https://localhost:6443\n name: karpor\n contexts:\n - context:\n cluster: karpor\n user: cluster1\n name: default\n current-context: default\n kind: Config\n users:\n - name: cluster1\n user:\n client-certificate-data: cert\n client-key-data: key\nkind: ConfigMap\nmetadata:\n name: karpor-kubeconfig\n namespace: karpor\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: karpor-agent\n namespace: karpor\nspec:\n replicas: 1\n revisionHistoryLimit: 10\n selector:\n matchLabels:\n app.kubernetes.io/component: karpor-agent\n app.kubernetes.io/instance: karpor\n app.kubernetes.io/name: karpor\n strategy:\n rollingUpdate:\n maxSurge: 25%\n maxUnavailable: 25%\n type: RollingUpdate\n template:\n metadata:\n labels:\n app.kubernetes.io/component: karpor-agent\n app.kubernetes.io/instance: karpor\n app.kubernetes.io/name: karpor\n spec:\n containers:\n - args:\n - agent\n - --elastic-search-addresses=https://localhost:6443 \n - --cluster-name=cluster1\n - --cluster-mode=pull\n command:\n - /karpor\n env:\n - name: KUBECONFIG\n value: /etc/karpor/config\n image: kusionstack/karpor:latest\n imagePullPolicy: IfNotPresent\n name: karpor-agent\n ports:\n - containerPort: 7443\n protocol: TCP\n resources:\n limits:\n cpu: 500m\n ephemeral-storage: 10Gi\n memory: 1Gi\n requests:\n cpu: 250m\n ephemeral-storage: 2Gi\n memory: 256Mi\n volumeMounts:\n - mountPath: /etc/karpor/\n name: karpor-kubeconfig\n dnsPolicy: ClusterFirst\n restartPolicy: Always\n terminationGracePeriodSeconds: 30\n volumes:\n - configMap:\n defaultMode: 420\n name: karpor-kubeconfig\n name: karpor-kubeconfig\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: karpor\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n name: default\n namespace: karpor\n" + renderResForPush = "apiVersion: v1\nkind: Namespace\nmetadata:\n name: karpor\nspec:\n finalizers:\n - kubernetes\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: karpor-agent\n namespace: karpor\nspec:\n replicas: 1\n revisionHistoryLimit: 10\n selector:\n matchLabels:\n app.kubernetes.io/component: karpor-agent\n app.kubernetes.io/instance: karpor\n app.kubernetes.io/name: karpor\n strategy:\n rollingUpdate:\n maxSurge: 25%\n maxUnavailable: 25%\n type: RollingUpdate\n template:\n metadata:\n labels:\n app.kubernetes.io/component: karpor-agent\n app.kubernetes.io/instance: karpor\n app.kubernetes.io/name: karpor\n spec:\n containers:\n - args:\n - agent\n - --elastic-search-addresses=https://localhost:6443 \n - --cluster-name=cluster1\n - --cluster-mode=push\n command:\n - /karpor\n image: kusionstack/karpor:latest\n imagePullPolicy: IfNotPresent\n name: karpor-agent\n ports:\n - containerPort: 7443\n protocol: TCP\n resources:\n limits:\n cpu: 500m\n ephemeral-storage: 10Gi\n memory: 1Gi\n requests:\n cpu: 250m\n ephemeral-storage: 2Gi\n memory: 256Mi\n dnsPolicy: ClusterFirst\n restartPolicy: Always\n terminationGracePeriodSeconds: 30\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: karpor\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n name: default\n namespace: karpor\n" +) diff --git a/pkg/syncer/single_cluster_sync_manager_test.go b/pkg/syncer/single_cluster_sync_manager_test.go index 0f5aaad5..4c00d547 100644 --- a/pkg/syncer/single_cluster_sync_manager_test.go +++ b/pkg/syncer/single_cluster_sync_manager_test.go @@ -24,7 +24,6 @@ import ( "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" - searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" "github.com/bytedance/mockey" "github.com/go-logr/logr" "github.com/stretchr/testify/mock" @@ -39,6 +38,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" ) var _ SingleClusterSyncManager = &fakeSingleClusterSyncManager{} diff --git a/pkg/syncer/source.go b/pkg/syncer/source.go index 07600128..7c533f42 100644 --- a/pkg/syncer/source.go +++ b/pkg/syncer/source.go @@ -19,12 +19,6 @@ import ( "fmt" "time" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" - "github.com/KusionStack/karpor/pkg/syncer/internal" - "github.com/KusionStack/karpor/pkg/syncer/jsonextracter" - "github.com/KusionStack/karpor/pkg/syncer/utils" - "github.com/KusionStack/karpor/pkg/util/jsonpath" "github.com/go-logr/logr" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,6 +34,11 @@ import ( ctrlhandler "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/internal" + "github.com/KusionStack/karpor/pkg/syncer/utils" ) const ( @@ -174,7 +173,7 @@ func (s *informerSource) createInformer(_ context.Context, handler ctrlhandler.E return nil, nil, fmt.Errorf("error parsing selectors: %v", selectors) } - trim, err := s.parseTrimer() + trim, err := parseTrimer(s.ResourceSyncRule.Trim) if err != nil { return nil, nil, errors.Wrap(err, "error parsing trim rule") } @@ -237,57 +236,3 @@ func parseSelectors(rsr v1beta1.ResourceSyncRule) ([]utils.Selector, error) { } return selectors, nil } - -func (s *informerSource) parseTrimer() (clientgocache.TransformFunc, error) { - t := s.ResourceSyncRule.Trim - if t == nil || len(t.Retain.JSONPaths) == 0 { - return nil, nil - } - - extracters := make([]jsonextracter.Extracter, 0, len(t.Retain.JSONPaths)) - for _, p := range t.Retain.JSONPaths { - p, err := jsonpath.RelaxedJSONPathExpression(p) - if err != nil { - return nil, err - } - - ex, err := jsonextracter.BuildExtracter(p, true) - if err != nil { - return nil, err - } - extracters = append(extracters, ex) - } - - trimFunc := func(obj interface{}) (ret interface{}, err error) { - defer func() { - if err != nil { - s.logger.Error(err, "error in triming object") - ret, err = obj, nil - } - }() - - if d, ok := obj.(clientgocache.DeletedFinalStateUnknown); ok { - // Since we import ES data into informer cache at startup, the - // resource that was deleted during the restart will generate - // DeletedFinalStateUnknown. - // We unwarp the object here, so there is no need for following - // steps including event handler to care about DeletedFinalStateUnknown. - obj = d.Obj - } - - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("trim: object's type should be *unstructured.Unstructured, but received %T", obj) - } - - merged, err := jsonextracter.Merge(extracters, u.Object) - if err != nil { - return nil, err - } - - unObj := &unstructured.Unstructured{Object: merged} - return unObj, nil - } - - return trimFunc, nil -} diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index 43d638f7..03032bc8 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -15,19 +15,11 @@ package syncer import ( - "bytes" "context" "fmt" "strings" - "text/template" "time" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" - "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" - "github.com/KusionStack/karpor/pkg/syncer/transform" - "github.com/KusionStack/karpor/pkg/syncer/utils" - sprig "github.com/Masterminds/sprig/v3" "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -39,6 +31,11 @@ import ( "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" + "github.com/KusionStack/karpor/pkg/syncer/utils" ) const ( @@ -139,7 +136,7 @@ func (s *ResourceSyncer) Run(ctx context.Context) error { // Wait for the caches to be synced before starting workers s.logger.Info("Waiting for informer caches to sync") - if transformFunc, err := s.parseTransformer(); err != nil { + if transformFunc, err := parseTransformer(s.source.SyncRule().Transform, s.source.Cluster()); err != nil { s.logger.Error(err, "error in parsing transform rule") } else { s.transformFunc = transformFunc @@ -279,50 +276,6 @@ func (s *ResourceSyncer) sync(ctx context.Context, key string) error { return nil } -// parseTransformer creates and returns a transformation function for the informerSource based on the ResourceSyncRule's transformers. -func (s *ResourceSyncer) parseTransformer() (clientgocache.TransformFunc, error) { - t := s.source.SyncRule().Transform - if t == nil { - return nil, nil - } - - fn, found := transform.GetTransformFunc(t.Type) - if !found { - return nil, fmt.Errorf("unsupported transform type %q", t.Type) - } - - tmpl, err := newTemplate(t.ValueTemplate, s.source.Cluster()) - if err != nil { - return nil, errors.Wrap(err, "invalid transform template") - } - - return func(obj interface{}) (ret interface{}, err error) { - defer func() { - if err != nil { - s.logger.Error(err, "error in transforming object") - } - }() - - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("transform: object's type should be *unstructured.Unstructured, but received %T", obj) - } - - templateData := struct { - *unstructured.Unstructured - Cluster string - }{ - Unstructured: u, - Cluster: s.source.Cluster(), - } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, templateData); err != nil { - return nil, errors.Wrap(err, "transform: error rendering template") - } - return fn(obj, buf.String()) - }, nil -} - // genUnObj creates a new unstructured.Unstructured object based on the ResourceSyncRule and key. func genUnObj(sr v1beta1.ResourceSyncRule, key string) *unstructured.Unstructured { obj := &unstructured.Unstructured{} @@ -337,9 +290,3 @@ func genUnObj(sr v1beta1.ResourceSyncRule, key string) *unstructured.Unstructure } return obj } - -// newTemplate creates and returns a new text template from the provided string, which can be used for processing templates in the syncer. -func newTemplate(tmpl, cluster string) (*template.Template, error) { - clusterFuncs, _ := transform.GetClusterTmplFuncs(cluster) - return template.New("transformTemplate").Funcs(sprig.FuncMap()).Funcs(clusterFuncs).Parse(tmpl) -} diff --git a/pkg/syncer/syncer_test.go b/pkg/syncer/syncer_test.go index 5d76e9ea..2b1cdb10 100644 --- a/pkg/syncer/syncer_test.go +++ b/pkg/syncer/syncer_test.go @@ -20,14 +20,15 @@ import ( "testing" "time" - "github.com/KusionStack/karpor/pkg/infra/search/storage" - "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" - "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" "github.com/bytedance/mockey" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/util/workqueue" + + "github.com/KusionStack/karpor/pkg/infra/search/storage" + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" ) func Test_genUnObj(t *testing.T) { @@ -289,15 +290,3 @@ func (q *fakeQueue) Add(item interface{}) { } q.queue = append(q.queue, item) } - -func TestResourceSyncer_parseTransformer(t *testing.T) { - s := &ResourceSyncer{ - source: &informerSource{ - ResourceSyncRule: v1beta1.ResourceSyncRule{Transform: &v1beta1.TransformRuleSpec{Type: "patch"}}, - cluster: "clusterName", - }, - } - - _, err := s.parseTransformer() - require.NoError(t, err) -} diff --git a/pkg/syncer/template/metadata.go b/pkg/syncer/template/metadata.go new file mode 100644 index 00000000..e145f0a8 --- /dev/null +++ b/pkg/syncer/template/metadata.go @@ -0,0 +1,27 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package template + +// Config is the struct that defines the metadata of the agent template. +type Config struct { + ExternalEndpoint string + CaCert string + CaKey string + StorageAddresses []string + ClusterName string + ClusterMode string + Level int + AgentImageTag string +} diff --git a/pkg/syncer/utils/dynamic_sync_rule.go b/pkg/syncer/utils/dynamic_sync_rule.go new file mode 100644 index 00000000..76de1b99 --- /dev/null +++ b/pkg/syncer/utils/dynamic_sync_rule.go @@ -0,0 +1,40 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" +) + +var ( + syncGVK sync.Map + ZeroVal = searchv1beta1.ResourceSyncRule{} +) + +func SetSyncGVK(gvk schema.GroupVersionKind, rule searchv1beta1.ResourceSyncRule) { + syncGVK.Store(gvk.String(), rule) +} + +func GetSyncGVK(gvk schema.GroupVersionKind) (searchv1beta1.ResourceSyncRule, bool) { + rule, exist := syncGVK.Load(gvk.String()) + if !exist { + return ZeroVal, false + } + return rule.(searchv1beta1.ResourceSyncRule), exist +} diff --git a/pkg/syncer/utils/dynamic_sync_rule_test.go b/pkg/syncer/utils/dynamic_sync_rule_test.go new file mode 100644 index 00000000..a218c37d --- /dev/null +++ b/pkg/syncer/utils/dynamic_sync_rule_test.go @@ -0,0 +1,49 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime/schema" + + searchv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/v1beta1" +) + +func TestGetSyncGVK(t *testing.T) { + // Test cases + testCases := []struct { + name string + gvk schema.GroupVersionKind + resourceSyncRule searchv1beta1.ResourceSyncRule + exist bool + }{ + { + name: "Success - ListKeys", + gvk: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + resourceSyncRule: ZeroVal, + exist: false, + }, + } + // Execute test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + resourceSyncRule, exist := GetSyncGVK(tc.gvk) + require.Equal(t, tc.exist, exist) + require.Equal(t, tc.resourceSyncRule, resourceSyncRule) + }) + } +} diff --git a/pkg/syncer/utils/kubernetes.go b/pkg/syncer/utils/kubernetes.go new file mode 100644 index 00000000..3fd7863a --- /dev/null +++ b/pkg/syncer/utils/kubernetes.go @@ -0,0 +1,95 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "sigs.k8s.io/yaml" + + "github.com/KusionStack/karpor/config" +) + +// ConvertToUnstructured converts the structured object to unstructured +func ConvertToUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, fmt.Errorf("failed to convert to Unstructured: %v", err) + } + + return &unstructured.Unstructured{ + Object: unstructuredObj, + }, nil +} + +// ApplyCrds applies crds to user cluster before other resources. +func ApplyCrds(ctx context.Context, dynamicClient dynamic.Interface) error { + for _, crd := range config.CrdList { + var objMap map[string]interface{} + err := yaml.Unmarshal(crd, &objMap) + if err != nil { + return err + } + + unstructuredObj := &unstructured.Unstructured{ + Object: objMap, + } + err = CreateOrUpdateUnstructured(ctx, dynamicClient, apiextensionsv1.SchemeGroupVersion.WithResource("customresourcedefinitions"), "", unstructuredObj) + if err != nil { + return err + } + } + + return nil +} + +// CreateOrUpdateUnstructured creates or updates object using dynamic client. +func CreateOrUpdateUnstructured(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string, newObject *unstructured.Unstructured) error { + resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) + + existingObj, getErr := resourceClient.Get(ctx, newObject.GetName(), metav1.GetOptions{}) + if getErr != nil { + if apierrors.IsNotFound(getErr) { + // set initial resource version + newObject.SetResourceVersion("0") + _, createErr := resourceClient.Create(ctx, newObject, metav1.CreateOptions{}) + if createErr != nil { + return errors.Wrapf(createErr, "failed to create resource") + } + } else { + return errors.Wrapf(getErr, "failed to get resource: %v", getErr) + } + } else { + // set uid and resource version for existed object + newObject.SetResourceVersion(existingObj.GetResourceVersion()) + newObject.SetUID(existingObj.GetUID()) + + _, updateErr := resourceClient.Update(ctx, newObject, metav1.UpdateOptions{}) + if updateErr != nil { + return errors.Wrapf(updateErr, "failed to update resource: %v", newObject.GetName()) + } + } + + return nil +} diff --git a/pkg/util/certgenerator/test_helper.go b/pkg/util/certgenerator/test_helper.go new file mode 100644 index 00000000..c5403817 --- /dev/null +++ b/pkg/util/certgenerator/test_helper.go @@ -0,0 +1,52 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certgenerator + +import ( + "context" + + coreV1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +type FakeCoreV1 struct { + v1.CoreV1Interface +} + +func (FakeCoreV1) Secrets(namespace string) v1.SecretInterface { + return &FakeSecret{} +} + +func (FakeCoreV1) ConfigMaps(namespace string) v1.ConfigMapInterface { + return &FakeConfigMap{} +} + +type FakeSecret struct { + v1.SecretInterface +} + +func (f *FakeSecret) Apply(ctx context.Context, secret *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *coreV1.Secret, err error) { + return &coreV1.Secret{}, nil +} + +type FakeConfigMap struct { + v1.ConfigMapInterface +} + +func (f *FakeConfigMap) Apply(ctx context.Context, secret *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *coreV1.ConfigMap, err error) { + return &coreV1.ConfigMap{}, nil +} diff --git a/pkg/util/certgenerator/util.go b/pkg/util/certgenerator/util.go index da31e252..99044ae9 100644 --- a/pkg/util/certgenerator/util.go +++ b/pkg/util/certgenerator/util.go @@ -20,6 +20,7 @@ import ( "crypto/elliptic" "crypto/rand" "crypto/rsa" + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" @@ -173,3 +174,21 @@ func EncodeCertPEM(cert *x509.Certificate) []byte { } return pem.EncodeToMemory(&block) } + +// LoadCertificate loads a certificate and its corresponding private key from files. +func LoadCertificate(certFile, keyFile string) (*x509.Certificate, crypto.Signer, error) { + // Load the certificate and key pair using tls.LoadX509KeyPair. + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, nil, fmt.Errorf("failed to load key pair: %w", err) + } + + // Parse the certificate. + certData, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse certificate: %w", err) + } + + // Return the parsed certificate and the private key as a crypto.Signer. + return certData, cert.PrivateKey.(crypto.Signer), nil +} diff --git a/pkg/util/clusterinstall/cluster_install.go b/pkg/util/clusterinstall/cluster_install.go index 16971e75..7b144337 100644 --- a/pkg/util/clusterinstall/cluster_install.go +++ b/pkg/util/clusterinstall/cluster_install.go @@ -17,12 +17,13 @@ package clusterinstall import ( "fmt" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" + + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" ) -func ConvertKubeconfigToCluster(name, displayName, description string, cfg *rest.Config) (*clusterv1beta1.Cluster, error) { +func ConvertKubeconfigToCluster(name, displayName, description, clusterMode string, clusterLevel int, cfg *rest.Config) (*clusterv1beta1.Cluster, error) { cluster := clusterv1beta1.Cluster{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterv1beta1.SchemeGroupVersion.String(), @@ -36,6 +37,12 @@ func ConvertKubeconfigToCluster(name, displayName, description string, cfg *rest } else { cluster.Spec.DisplayName = name } + if clusterMode != "" { + cluster.Spec.Mode = clusterMode + } + if clusterLevel > 0 && clusterLevel <= 3 { + cluster.Spec.Level = clusterLevel + } access := clusterv1beta1.ClusterAccess{} if !cfg.Insecure { access.CABundle = cfg.CAData diff --git a/pkg/util/clusterinstall/cluster_install_test.go b/pkg/util/clusterinstall/cluster_install_test.go index 21f0dd8a..85894a01 100644 --- a/pkg/util/clusterinstall/cluster_install_test.go +++ b/pkg/util/clusterinstall/cluster_install_test.go @@ -19,19 +19,22 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" "github.com/stretchr/testify/require" "k8s.io/client-go/rest" + + clusterv1beta1 "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/v1beta1" ) func TestConvertKubeconfigToCluster(t *testing.T) { tests := []struct { - name string - displayName string - description string - cfg *rest.Config - wantCluster *clusterv1beta1.Cluster - wantErr bool + name string + displayName string + description string + clusterMode string + clusterLevel int + cfg *rest.Config + wantCluster *clusterv1beta1.Cluster + wantErr bool }{ // Test case with secure setup (non-insecure, with certificate data) { @@ -122,7 +125,7 @@ func TestConvertKubeconfigToCluster(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { // Call the function under test - cluster, err := ConvertKubeconfigToCluster(tc.name, tc.displayName, tc.description, tc.cfg) + cluster, err := ConvertKubeconfigToCluster(tc.name, tc.displayName, tc.description, tc.clusterMode, tc.clusterLevel, tc.cfg) // Assert that an error occurred when expected if tc.wantErr { require.Error(t, err) diff --git a/pkg/util/ctxutil/ctxutil.go b/pkg/util/ctxutil/ctxutil.go index 53117e32..6fa0d73c 100644 --- a/pkg/util/ctxutil/ctxutil.go +++ b/pkg/util/ctxutil/ctxutil.go @@ -17,9 +17,10 @@ package ctxutil import ( "context" - "github.com/KusionStack/karpor/pkg/core/middleware" "github.com/go-logr/logr" "k8s.io/klog/v2" + + "github.com/KusionStack/karpor/pkg/core/middleware" ) // GetLogger returns the logger from the given context. diff --git a/pkg/util/ctxutil/ctxutil_test.go b/pkg/util/ctxutil/ctxutil_test.go index a7264550..e0388f02 100644 --- a/pkg/util/ctxutil/ctxutil_test.go +++ b/pkg/util/ctxutil/ctxutil_test.go @@ -18,10 +18,11 @@ import ( "context" "testing" - "github.com/KusionStack/karpor/pkg/core/middleware" "github.com/go-logr/logr" "github.com/stretchr/testify/require" "k8s.io/klog/v2" + + "github.com/KusionStack/karpor/pkg/core/middleware" ) func TestGetLogger(t *testing.T) { diff --git a/pkg/util/safeutil/safeutil.go b/pkg/util/safeutil/safeutil.go index c80843b4..54a39966 100644 --- a/pkg/util/safeutil/safeutil.go +++ b/pkg/util/safeutil/safeutil.go @@ -19,10 +19,11 @@ import ( "fmt" "runtime/debug" - "github.com/KusionStack/karpor/pkg/util/ctxutil" "github.com/elliotxx/safe" "github.com/go-logr/logr" "k8s.io/klog/v2" + + "github.com/KusionStack/karpor/pkg/util/ctxutil" ) // LoggerRecoverHandler returns a recover handler by the given logger. diff --git a/pkg/version/VERSION b/pkg/version/VERSION index b417ed46..b82608c0 100644 --- a/pkg/version/VERSION +++ b/pkg/version/VERSION @@ -1 +1 @@ -default-version +v0.1.0 diff --git a/sdk/agent.go b/sdk/agent.go new file mode 100644 index 00000000..2a9bfc22 --- /dev/null +++ b/sdk/agent.go @@ -0,0 +1,105 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdk + +import ( + "context" + + esclient "github.com/elastic/go-elasticsearch/v8" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2/klogr" + coreinstall "k8s.io/kubernetes/pkg/apis/core/install" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/KusionStack/karpor/pkg/infra/search/storage/elasticsearch" + clusterinstall "github.com/KusionStack/karpor/pkg/kubernetes/apis/cluster/install" + searchinstall "github.com/KusionStack/karpor/pkg/kubernetes/apis/search/install" + "github.com/KusionStack/karpor/pkg/syncer" + "github.com/KusionStack/karpor/pkg/syncer/utils" +) + +type AgentOptions struct { + ElasticSearchAddresses []string + ClusterName string + + SyncGVKs []schema.GroupVersionKind +} + +/* +StartAgentWithOperator defines sdk entrance. +Developers can use this function to share memory with exited operator for special resources in SyncGVK. +Only support push mode. +*/ +func StartAgentWithOperator(ctx context.Context, existMgr manager.Manager, options *AgentOptions) error { + ctrl.SetLogger(klogr.New()) + log := ctrl.Log.WithName("setup") + + defer func() { + if err := recover(); err != nil { + log.Error(errors.New("recovered panic in karpor agent"), "") + } + }() + + // apply crds + dynamicClient, err := dynamic.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + return errors.Wrapf(err, "failed to build dynamic client for ageng") + } + err = utils.ApplyCrds(ctx, dynamicClient) + if err != nil { + return err + } + + // TODO: add startup parameters to change the type of storage + //nolint:contextcheck + es, err := elasticsearch.NewStorage(esclient.Config{ + Addresses: options.ElasticSearchAddresses, + }) + if err != nil { + log.Error(err, "unable to init elasticsearch client") + return err + } + + // start operator for special resource + if len(options.SyncGVKs) != 0 { + for idx := range options.SyncGVKs { + gvk := options.SyncGVKs[idx] + utils.SetSyncGVK(gvk, utils.ZeroVal) + err := syncer.NewDynamicReconciler(ctx, options.ClusterName, gvk, es).SetupWithManager(existMgr) + if err != nil { + return err + } + } + } + + //nolint:contextcheck + if err = syncer.NewAgentReconciler(es, options.ClusterName).SetupWithManager(existMgr); err != nil { + log.Error(err, "unable to create resource syncer") + return err + } + + return nil +} + +// AddToScheme install necessary api to existed scheme +func AddToScheme(scheme *runtime.Scheme) { + clusterinstall.Install(scheme) + searchinstall.Install(scheme) + coreinstall.Install(scheme) +} diff --git a/sdk/example/example.go b/sdk/example/example.go new file mode 100644 index 00000000..595ad86e --- /dev/null +++ b/sdk/example/example.go @@ -0,0 +1,92 @@ +// Copyright The Karpor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/KusionStack/karpor/sdk" +) + +type PodReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log := ctrl.Log.WithName("setup") + + log.Info("Reconciling Pod: %s/%s", pod.Namespace, pod.Name) + + return ctrl.Result{}, nil +} + +func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Pod{}). + Complete(r) +} + +// example for sdk, only support push mode. +func main() { + scheme := runtime.NewScheme() + sdk.AddToScheme(scheme) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to start manager: %v\n", err) + os.Exit(1) + } + + if err = (&PodReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + fmt.Fprintf(os.Stderr, "unable to create controller: %v\n", err) + os.Exit(1) + } + + err = sdk.StartAgentWithOperator(context.Background(), mgr, &sdk.AgentOptions{ + ClusterName: "example-cluster", + ElasticSearchAddresses: []string{"http://127.0.0.1:9200"}, + SyncGVKs: []schema.GroupVersionKind{ + {Group: "", Version: "v1", Kind: "Pod"}, + }, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to add dynamic controller: %v\n", err) + os.Exit(1) + } + + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + fmt.Fprintf(os.Stderr, "problem running manager: %v\n", err) + os.Exit(1) + } +} diff --git a/ui/package.json b/ui/package.json index ded7c44c..43b68d50 100644 --- a/ui/package.json +++ b/ui/package.json @@ -62,7 +62,9 @@ "release": "standard-version", "dev": "GENERATE_SOURCEMAP=false PORT=8000 craco start", "start": "GENERATE_SOURCEMAP=false PORT=8080 HTTPS=true craco start", + "start:inner": "GENERATE_SOURCEMAP=false PORT=8080 HTTPS=true REACT_APP_DEPLOY_MODE=HIGH_AVAILABILITY craco start", "build": "GENERATE_SOURCEMAP=false PUBLIC_URL=/public/ craco build", + "build:inner": "GENERATE_SOURCEMAP=false PUBLIC_URL=/public/ REACT_APP_DEPLOY_MODE=HIGH_AVAILABILITY craco build", "lint:prettier": "prettier --write src\"/**/*.+(js|ts|tsx|jsx|json|md|json)\"", "lint:style": "stylelint src\"/**/*.+(css|scss|less|stylus|sass|postcss)\" --fix", "lint:fix": "eslint --fix --ext .js,.jsx,.ts,.tsx src", diff --git a/ui/src/components/agentYaml/index.tsx b/ui/src/components/agentYaml/index.tsx new file mode 100644 index 00000000..3cb9dc40 --- /dev/null +++ b/ui/src/components/agentYaml/index.tsx @@ -0,0 +1,428 @@ +import React, { useEffect, useRef, useState, useCallback } from 'react' +import type { LegacyRef } from 'react' +import { Alert, Button, message, Space, Spin, Tooltip } from 'antd' +import { Resizable } from 're-resizable' +import { useTranslation } from 'react-i18next' +import { + CopyOutlined, + CloseOutlined, + PoweroffOutlined, + FullscreenExitOutlined, + FullscreenOutlined, +} from '@ant-design/icons' +import hljs from 'highlight.js' +import 'highlight.js/styles/lightfair.css' +import { useSelector } from 'react-redux' +import Markdown from 'react-markdown' +import axios from 'axios' +import { FullScreen, useFullScreenHandle } from 'react-full-screen' +import i18n from '@/i18n' +import aiSummarySvg from '@/assets/ai-summary.svg' + +import styles from './styles.module.less' + +// eslint-disable-next-line @typescript-eslint/no-var-requires +hljs.registerLanguage('yaml', require('highlight.js/lib/languages/yaml')) + +type InterpretStatus = + | 'idle' + | 'init' + | 'streaming' + | 'complete' + | 'error' + | 'loading' + +type IProps = { + data: any + height?: string | number +} + +const AgentYaml = (props: IProps) => { + const { t } = useTranslation() + const handle = useFullScreenHandle() + const yamlRef = useRef | any>() + const diagnosisContentRef = useRef(null) + const interpretEndRef = useRef(null) + const contentRef = useRef(null) + const observerRef = useRef(null) + const { data } = props + const [moduleHeight, setModuleHeight] = useState(500) + const [interpretStatus, setInterpretStatus] = + useState('idle') + const [interpret, setInterpret] = useState('') + const [isStreaming, setStreaming] = useState(false) + const abortControllerRef = useRef(null) + const { aiOptions } = useSelector((state: any) => state.globalSlice) + const isAIEnabled = aiOptions?.AIModel && aiOptions?.AIAuthToken + + useEffect(() => { + if (yamlRef.current && data) { + yamlRef.current.textContent = data + hljs.highlightElement(yamlRef.current) + } + }, [data]) + + // Function to scroll to the bottom of the container + const scrollToBottom = useCallback(() => { + if (diagnosisContentRef.current && interpretStatus === 'streaming') { + const container = diagnosisContentRef.current + const scrollHeight = container.scrollHeight + const height = container.clientHeight + const maxScroll = scrollHeight - height + container.scrollTo({ + top: maxScroll, + behavior: 'auto', + }) + } + }, [interpretStatus]) + + // Watch for content changes + useEffect(() => { + if (interpretStatus === 'streaming' && diagnosisContentRef.current) { + if (observerRef.current) { + observerRef.current.disconnect() + } + + const observer = new MutationObserver(() => { + scrollToBottom() + }) + + observer.observe(diagnosisContentRef.current, { + childList: true, + subtree: true, + characterData: true, + }) + + observerRef.current = observer + + return () => { + observer.disconnect() + } + } + }, [interpretStatus, scrollToBottom]) + + // Scroll when content updates + useEffect(() => { + if (interpretStatus === 'streaming') { + scrollToBottom() + } + }, [interpret, scrollToBottom, interpretStatus]) + + function copy() { + const textarea = document.createElement('textarea') + textarea.value = data + document.body.appendChild(textarea) + textarea.select() + document.execCommand('copy') + message.success(t('CopySuccess')) + document.body.removeChild(textarea) + } + + const handleInterpret = async () => { + try { + if (!data) { + message.warning(t('YAML.NoContent')) + return + } + + // Reset interpret state + setInterpret('') + setInterpretStatus('loading' as InterpretStatus) + + // Cancel any existing SSE connection + if (abortControllerRef.current) { + abortControllerRef.current.abort() + } + + // Create new AbortController for this request + const abortController = new AbortController() + abortControllerRef.current = abortController + + setStreaming(true) + + // Create new fetch request for interpret + const url = `${axios.defaults.baseURL}/rest-api/v1/insight/yaml/interpret/stream` + + // Send POST request and handle SSE response + fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'text/event-stream', + }, + body: JSON.stringify({ + yaml: data, + language: i18n.language, + }), + signal: abortController.signal, + }) + .then(response => { + if (!response.ok) { + throw new Error(response.statusText) + } + + // Create a reader from the response body stream + const reader = response.body?.getReader() + const decoder = new TextDecoder() + + if (!reader) { + throw new Error('No response body') + } + + // Read the stream + const processStream = async () => { + try { + let streaming = true + while (streaming) { + const { done, value } = await reader.read() + + if (done) { + streaming = false + setInterpretStatus('complete' as InterpretStatus) + break + } + + // Decode the chunk and process events + const chunk = decoder.decode(value) + const events = chunk + .split('\n\n') + .filter(Boolean) + .map(event => event.replace('data: ', '')) + + for (const event of events) { + try { + const interpretEvent = JSON.parse(event) + + switch (interpretEvent.type) { + case 'start': + setInterpretStatus('streaming' as InterpretStatus) + break + case 'chunk': + setInterpret(prev => prev + interpretEvent.content) + if (interpretEndRef.current) { + interpretEndRef.current.scrollIntoView({ + behavior: 'smooth', + }) + } + break + case 'error': + streaming = false + setInterpretStatus('error' as InterpretStatus) + message.error(interpretEvent.content) + reader.cancel() + break + case 'complete': + streaming = false + setInterpretStatus('complete' as InterpretStatus) + reader.cancel() + break + } + } catch (error) { + console.error('Failed to parse interpret event:', error) + } + } + } + } catch (error) { + if (error.name === 'AbortError') { + console.log('Interpret stream aborted') + } else { + console.error('Error reading stream:', error) + setInterpretStatus('error' as InterpretStatus) + message.error(t('YAML.InterpretConnectionError')) + } + } + } + + processStream() + }) + .catch(error => { + if (error.name !== 'AbortError') { + console.error('Failed to start interpret:', error) + setInterpretStatus('error' as InterpretStatus) + message.error(t('YAML.FailedToStartInterpret')) + } + }) + } catch (error) { + console.error('Failed to start interpret:', error) + setInterpretStatus('error' as InterpretStatus) + message.error(t('YAML.FailedToInterpret')) + } finally { + setStreaming(false) + } + } + const contentToTopHeight = contentRef.current?.getBoundingClientRect()?.top + const dotToTopHeight = interpretEndRef.current?.getBoundingClientRect()?.top + + return ( +
+ { + const newModuleHeight = moduleHeight + d.height + setModuleHeight(newModuleHeight) + }} + handleStyles={{ + bottom: { + bottom: 0, + height: '6px', + cursor: 'row-resize', + background: 'transparent', + transition: 'background 0.3s ease', + }, + }} + handleClasses={{ + bottom: styles.resizeHandle, + }} + > +
+ +
+
+ + {data && ( + <> + {!handle.active && ( + + + )} + {handle.active && ( + + + + )} + +
+
+
+ + {interpretStatus !== 'idle' && ( +
+
+ +
+ ai summary +
+ {t('YAML.InterpretResult')} +
+ + {interpretStatus === 'streaming' && ( + +
+
+
+ {interpretStatus === 'loading' || + (interpretStatus === 'streaming' && !interpret) ? ( +
+ +

{t('EventAggregator.DiagnosisInProgress')}

+
+ ) : interpretStatus === 'streaming' ? ( + <> + {interpret} +
+ + ) : interpretStatus === 'error' ? ( + + ) : ( + {interpret} + )} +
+ {interpretStatus === 'streaming' && interpret && ( +
= 0 ? styles.yaml_content_streamingIndicatorFixed : ''}`} + > + + + +
+ )} +
+
+ )} +
+ +
+ ) +} + +export default AgentYaml diff --git a/ui/src/components/agentYaml/styles.module.less b/ui/src/components/agentYaml/styles.module.less new file mode 100644 index 00000000..3a7ad7b6 --- /dev/null +++ b/ui/src/components/agentYaml/styles.module.less @@ -0,0 +1,739 @@ +.yaml_content { + height: 100%; + overflow-y: auto; + border-radius: 8px; + position: relative; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + display: flex; + gap: 16px; + background: #fff; + + .fullScreenConatiner { + width: 100%; + + .yaml_container { + height: 100%; + position: relative; + flex: 1; + + &:hover { + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); + transform: translateY(-1px); + border-radius: 8px; + + .copy { + opacity: 1; + transform: scale(1.02); + } + } + + .copy { + position: absolute; + top: 16px; + right: 24px; + z-index: 10; + opacity: 0.95; + transform: translateY(0); + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &::before { + content: ''; + position: absolute; + top: -2px; + left: -2px; + right: -2px; + bottom: -2px; + background: linear-gradient(45deg, #7cdce7, #7ec699); + border-radius: 6px; + z-index: -1; + opacity: 0; + transition: opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1); + } + + :global { + .ant-btn { + background: rgba(30, 30, 30, 0.95); + color: #7cdce7; + border: 1px solid rgba(124, 220, 231, 0.3); + height: 28px; + font-weight: 500; + font-family: 'JetBrains Mono', monospace; + letter-spacing: 0.5px; + backdrop-filter: blur(4px); + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + box-shadow: 0 0 2px rgba(20, 20, 20, 0.3); + border-radius: 4px; + display: flex; + align-items: center; + gap: 6px; + + .anticon { + font-size: 14px; + transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); + } + + &:hover { + color: #fff; + border-color: rgba(124, 220, 231, 0.4); + transform: translateY(-1px); + box-shadow: 0 0 3px rgba(20, 20, 20, 0.4); + background: rgba(35, 35, 35, 0.95); + + .anticon { + transform: scale(1.1); + } + } + + &:active { + transform: translateY(0); + box-shadow: 0 0 1px rgba(20, 20, 20, 0.3); + background: rgba(28, 28, 28, 0.95); + } + } + } + + &:hover { + &::before { + opacity: 0.1; + } + } + } + + .magicWand { + font-size: 16px; + line-height: 1; + display: inline-block; + transform: translateY(1px); + filter: drop-shadow(0 0 4px rgba(147, 112, 219, 0.4)); + animation: sparkle 1.5s ease-in-out infinite; + } + + .yaml_box { + width: 100%; + height: 100%; + padding: 16px 20px; + overflow-y: auto; + font-family: 'JetBrains Mono', 'Fira Code', Menlo, Monaco, Consolas, + 'Courier New', monospace; + font-size: 13px; + line-height: 1.6; + color: #e0e0e0; + word-break: break-all; + white-space: pre-wrap; + background-color: #1e1e1e; + border-radius: 8px; + box-sizing: border-box; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &::-webkit-scrollbar { + width: 8px; + height: 8px; + } + + &::-webkit-scrollbar-track { + background: rgba(147, 112, 219, 0.05); + border-radius: 4px; + } + + &::-webkit-scrollbar-thumb { + background: rgba(147, 112, 219, 0.2); + border-radius: 4px; + border: 2px solid transparent; + background-clip: padding-box; + + &:hover { + background: rgba(147, 112, 219, 0.3); + border: 2px solid transparent; + background-clip: padding-box; + } + } + + :global { + .hljs { + background: transparent; + padding: 0; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &-attr { + color: #7cdce7; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &:hover { + color: lighten(#7cdce7, 10%); + } + } + + &-string { + color: #7ec699; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &:hover { + color: lighten(#7ec699, 10%); + } + } + + &-number { + color: #f08d49; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &:hover { + color: lighten(#f08d49, 10%); + } + } + + &-boolean { + color: #cc99cd; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &:hover { + color: lighten(#cc99cd, 10%); + } + } + + &-null { + color: #cc99cd; + transition: color 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &:hover { + color: lighten(#cc99cd, 10%); + } + } + } + } + } + } + } + + .yaml_content_diagnosisPanel { + box-sizing: border-box; + width: 400px; + background: #2b1d3c; + border-radius: 12px; + display: flex; + flex-direction: column; + backdrop-filter: blur(8px); + z-index: 10; + animation: slideIn 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + .yaml_content_diagnosisHeader { + display: flex; + justify-content: space-between; + align-items: center; + padding: 12px; + background: linear-gradient( + 135deg, + rgba(147, 112, 219, 0.15) 0%, + rgba(43, 29, 60, 0.95) 100% + ); + border-bottom: 1px solid rgba(147, 112, 219, 0.2); + border-radius: 12px 12px 0 0; + color: #e6e6fa; + font-size: 14px; + font-weight: 500; + backdrop-filter: blur(4px); + + .yaml_content_diagnosisHeader_aiIcon { + width: 18px; + height: 18px; + + img { + width: 100%; + height: 100%; + } + } + + .stopButton { + color: rgba(255, 255, 255, 0.45); + transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1); + width: 28px; + height: 28px; + display: flex; + align-items: center; + justify-content: center; + border-radius: 6px; + margin-right: 8px; + position: relative; + overflow: hidden; + + &::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(255, 77, 79, 0); + transition: background 0.3s ease; + z-index: 0; + } + + :global(.anticon) { + font-size: 16px; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + position: relative; + z-index: 1; + } + + &:hover { + color: #ff4d4f; + transform: scale(1.05); + + &::before { + background: rgba(255, 77, 79, 0.15); + } + + :global(.anticon) { + transform: rotate(180deg) scale(1.1); + } + } + + &:active { + transform: scale(0.95); + + &::before { + background: rgba(255, 77, 79, 0.25); + } + } + } + + .anticon { + margin-right: 8px; + font-size: 16px; + color: #9370db; + text-shadow: 0 0 8px rgba(147, 112, 219, 0.4); + } + + button { + color: rgba(230, 230, 250, 0.85); + transition: all 0.3s ease; + border-radius: 6px; + width: 28px; + height: 28px; + display: flex; + align-items: center; + justify-content: center; + + &:hover { + color: #fff; + background: rgba(147, 112, 219, 0.2); + transform: scale(1.05); + } + + &:active { + transform: scale(0.95); + } + } + } + + .yaml_content_diagnosisBody { + position: relative; + width: 400px; + overflow-y: auto; + height: 100%; + box-sizing: border-box; + border-radius: 0 0 12px 12px; + + &::-webkit-scrollbar { + width: 8px; + height: 8px; + } + + &::-webkit-scrollbar-track { + background: rgba(147, 112, 219, 0.05); + border-radius: 4px; + } + + &::-webkit-scrollbar-thumb { + background: rgba(147, 112, 219, 0.2); + border-radius: 4px; + border: 2px solid transparent; + background-clip: padding-box; + + &:hover { + background: rgba(147, 112, 219, 0.3); + border: 2px solid transparent; + background-clip: padding-box; + } + } + + .yaml_content_diagnosisContent { + color: #d4d4d4; + background: #2b1d3c; + border-radius: 0 0 12px 12px; + padding: 16px; + font-size: 14px; + line-height: 1.6; + box-sizing: border-box; + word-wrap: break-word; + + .yaml_content_diagnosisLoading { + height: 100%; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + gap: 16px; + color: rgba(230, 230, 250, 0.85); + + :global(.ant-spin) { + .ant-spin-dot-item { + background-color: #9370db; + } + } + + p { + font-size: 14px; + color: rgba(230, 230, 250, 0.85); + } + } + + h1, + h2, + h3 { + color: #e6e6fa; + margin-bottom: 16px; + font-weight: 600; + letter-spacing: -0.01em; + } + + h1 { + font-size: 20px; + } + + h2 { + font-size: 18px; + margin-top: 24px; + padding-bottom: 8px; + border-bottom: 1px solid rgba(147, 112, 219, 0.2); + } + + h3 { + font-size: 16px; + margin-top: 20px; + } + + p { + margin-bottom: 16px; + line-height: 1.7; + } + + ul, + ol { + padding-left: 24px; + margin-bottom: 16px; + } + + li { + margin-bottom: 8px; + } + + code { + background: rgba(147, 112, 219, 0.1); + padding: 2px 6px; + border-radius: 4px; + font-family: 'Menlo', 'Monaco', 'Courier New', monospace; + font-size: 13px; + } + + pre { + background: rgba(20, 15, 30, 0.8); + padding: 16px; + border-radius: 8px; + margin-bottom: 16px; + overflow-x: auto; + border: 1px solid rgba(147, 112, 219, 0.1); + + &::-webkit-scrollbar { + width: 8px; + height: 8px; + } + + &::-webkit-scrollbar-track { + background: rgba(147, 112, 219, 0.05); + border-radius: 4px; + } + + &::-webkit-scrollbar-thumb { + background: rgba(147, 112, 219, 0.2); + border-radius: 4px; + border: 2px solid transparent; + background-clip: padding-box; + + &:hover { + background: rgba(147, 112, 219, 0.3); + border: 2px solid transparent; + background-clip: padding-box; + } + } + + code { + background: none; + padding: 0; + } + } + + :global { + .markdown-body { + color: #d4d4d4; + background: transparent; + padding: 16px; + + h1, + h2, + h3, + h4, + h5, + h6 { + color: rgba(230, 230, 250, 0.95); + border-bottom: 1px solid rgba(147, 112, 219, 0.2); + margin-top: 24px; + margin-bottom: 16px; + font-weight: 600; + line-height: 1.25; + } + + h1 { + font-size: 24px; + } + + h2 { + font-size: 20px; + } + + h3 { + font-size: 18px; + } + + h4 { + font-size: 16px; + } + + h5 { + font-size: 14px; + } + + h6 { + font-size: 13px; + } + + pre { + background: rgba(0, 0, 0, 0.4); + border: 1px solid rgba(147, 112, 219, 0.5); + border-radius: 6px; + padding: 12px; + margin: 12px 0; + overflow-x: auto; + + code { + background: transparent; + padding: 0; + color: #f0e6ff; + } + } + + code { + background: rgba(147, 112, 219, 0.2); + padding: 2px 6px; + border-radius: 4px; + color: #f0e6ff; + font-family: 'Menlo', 'Monaco', 'Courier New', monospace; + } + + ul, + ol { + padding-left: 24px; + margin: 8px 0; + + li { + margin: 4px 0; + } + } + + blockquote { + color: rgba(230, 230, 250, 0.85); + border-left: 4px solid rgba(147, 112, 219, 0.4); + background: rgba(147, 112, 219, 0.1); + margin: 16px 0; + padding: 12px 16px; + border-radius: 0 8px 8px 0; + } + + a { + color: #9370db; + text-decoration: none; + transition: all 0.2s ease; + border-bottom: 1px solid transparent; + + &:hover { + color: lighten(#9370db, 10%); + border-bottom-color: currentColor; + } + } + + table { + border-collapse: separate; + border-spacing: 0; + width: 100%; + margin: 16px 0; + border-radius: 8px; + border: 1px solid rgba(147, 112, 219, 0.2); + overflow: hidden; + + th, + td { + border: 1px solid rgba(147, 112, 219, 0.2); + padding: 12px; + } + + th { + background: rgba(147, 112, 219, 0.1); + font-weight: 600; + text-align: left; + } + + tr { + background-color: transparent; + transition: background-color 0.2s ease; + + &:nth-child(2n) { + background-color: rgba(147, 112, 219, 0.05); + } + + &:hover { + background-color: rgba(147, 112, 219, 0.1); + } + } + } + + hr { + border: none; + height: 1px; + background: linear-gradient( + to right, + rgba(147, 112, 219, 0.1), + rgba(147, 112, 219, 0.4), + rgba(147, 112, 219, 0.1) + ); + margin: 24px 0; + } + } + } + } + + .yaml_content_streamingIndicator { + display: flex; + align-items: center; + gap: 4px; + padding: 16px; + + .dot { + width: 6px; + height: 6px; + background-color: #4447c3; + border-radius: 50%; + opacity: 0.3; + animation: dotPulse 1.4s infinite; + + &:nth-child(2) { + animation-delay: 0.2s; + } + + &:nth-child(3) { + animation-delay: 0.4s; + } + } + } + + .yaml_content_streamingIndicatorFixed { + position: sticky; + bottom: 0; + left: 0; + right: 0; + } + } + } +} + +.resizeHandle { + position: absolute; + bottom: 0; + left: 0; + right: 0; + height: 6px; + cursor: row-resize; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + + &::after { + content: ''; + position: absolute; + left: 50%; + top: 50%; + transform: translate(-50%, -50%) scaleX(0.6); + width: 60px; + height: 2px; + background: rgba(47, 84, 235, 0.1); + border-radius: 1px; + opacity: 0; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + } + + &:hover { + background: rgba(47, 84, 235, 0.08); + + &::after { + opacity: 1; + transform: translate(-50%, -50%) scaleX(1); + background: rgba(47, 84, 235, 0.3); + } + } + + &:active { + background: rgba(47, 84, 235, 0.15); + + &::after { + opacity: 1; + background: rgba(47, 84, 235, 0.4); + transform: translate(-50%, -50%) scaleX(1.2); + box-shadow: 0 0 10px rgba(47, 84, 235, 0.3); + } + } +} + +@keyframes slideIn { + from { + opacity: 0; + transform: translateX(20px); + } + + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes sparkle { + 0%, + 100% { + filter: drop-shadow(0 0 4px rgba(147, 112, 219, 0.4)); + transform: translateY(1px) scale(1); + } + + 50% { + filter: drop-shadow(0 0 8px rgba(147, 112, 219, 0.6)); + transform: translateY(1px) scale(1.1); + } +} + +@keyframes dotPulse { + 0% { + opacity: 0.3; + } + + 50% { + opacity: 1; + } + + 100% { + opacity: 0.3; + } +} diff --git a/ui/src/components/layout/index.tsx b/ui/src/components/layout/index.tsx index 995e61c8..021cc96a 100644 --- a/ui/src/components/layout/index.tsx +++ b/ui/src/components/layout/index.tsx @@ -18,6 +18,7 @@ import { setGithubBadge, setIsUnsafeMode, setAIOptions, + setIsHighAvailability, } from '@/store/modules/globalSlice' import { useTranslation } from 'react-i18next' import showPng from '@/assets/show.png' @@ -72,6 +73,7 @@ const LayoutPage = () => { dispatch(setVersionNumber(response?.Version)) dispatch(setGithubBadge(response?.CoreOptions?.GithubBadge)) dispatch(setAIOptions(response?.AIOptions)) + dispatch(setIsHighAvailability(response?.CoreOptions?.HighAvailability)) } }, [response, dispatch]) diff --git a/ui/src/pages/cluster/add/index.tsx b/ui/src/pages/cluster/add/index.tsx index f9a5890f..c85102ba 100644 --- a/ui/src/pages/cluster/add/index.tsx +++ b/ui/src/pages/cluster/add/index.tsx @@ -1,7 +1,16 @@ import React, { useState, useEffect } from 'react' import { useNavigate } from 'react-router-dom' import { ArrowLeftOutlined, UploadOutlined } from '@ant-design/icons' -import { Form, Input, Space, Button, Upload, message, notification } from 'antd' +import { + Form, + Input, + Space, + Button, + Upload, + message, + notification, + Select, +} from 'antd' import type { UploadProps } from 'antd' import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' @@ -13,11 +22,13 @@ import styles from './styles.module.less' const { TextArea } = Input +const { Option } = Select + const RegisterCluster = () => { const { t } = useTranslation() const [form] = Form.useForm() const navigate = useNavigate() - const { isReadOnlyMode, isUnsafeMode } = useSelector( + const { isReadOnlyMode, isUnsafeMode, isHighAvailability } = useSelector( (state: any) => state.globalSlice, ) const [yamlContent, setYamlContent] = useState('') @@ -82,6 +93,10 @@ const RegisterCluster = () => { const uploadProps: UploadProps = { disabled: isReadOnlyMode, name: 'file', + data: { + clusterMode: form.getFieldValue('clusterMode'), + clusterLevel: form.getFieldValue('clusterLevel'), + }, action: `${HOST}/rest-api/v1/cluster/config/file`, headers: { Authorization: isUnsafeMode @@ -161,6 +176,41 @@ const RegisterCluster = () => { >