diff --git a/cmd/build.go b/cmd/build.go index f335a1f..e768aff 100644 --- a/cmd/build.go +++ b/cmd/build.go @@ -10,13 +10,58 @@ import ( "github.com/spf13/cobra" ) +var ( + buildDockerfile string + buildTag string + buildContext string + buildNoCache bool + buildBuildArgs []string + buildMemSize int64 + buildVcpuCount int64 + buildFsSize int64 +) + // buildCmd represents the build command var buildCmd = &cobra.Command{ Use: "build", Short: "Build a rootfs image", Long: `Build a rootfs image according to the configuration in vers.toml and the Dockerfile in the current directory.`, RunE: func(cmd *cobra.Command, args []string) error { - // Load configuration from vers.toml + // If --dockerfile is specified, delegate to docker build with deprecation warning + if buildDockerfile != "" { + fmt.Fprintln(cmd.ErrOrStderr(), "โš ๏ธ DEPRECATED: 'vers build --dockerfile' is deprecated. Use 'vers docker build' instead.") + fmt.Fprintln(cmd.ErrOrStderr()) + + // Determine build context + ctxDir := buildContext + if ctxDir == "" { + ctxDir = "." + } + + apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.BuildUpload) + defer cancel() + + req := handlers.DockerBuildReq{ + DockerfilePath: buildDockerfile, + BuildContext: ctxDir, + Tag: buildTag, + MemSizeMib: buildMemSize, + VcpuCount: buildVcpuCount, + FsSizeMib: buildFsSize, + NoCache: buildNoCache, + BuildArgs: buildBuildArgs, + } + + view, err := handlers.HandleDockerBuild(apiCtx, application, req) + if err != nil { + return err + } + + pres.RenderDockerBuild(application, view) + return nil + } + + // Original build behavior (rootfs from vers.toml) config, err := runconfig.Load() if err != nil { return fmt.Errorf("failed to load configuration: %w", err) @@ -44,5 +89,17 @@ func init() { // Add flags to override toml configuration buildCmd.Flags().String("rootfs", "", "Override rootfs name") - buildCmd.Flags().String("dockerfile", "", "Dockerfile path") + + // Dockerfile build flags (deprecated - use 'vers docker build' instead) + buildCmd.Flags().StringVar(&buildDockerfile, "dockerfile", "", "Path to Dockerfile (DEPRECATED: use 'vers docker build' instead)") + buildCmd.Flags().StringVarP(&buildTag, "tag", "t", "", "Tag for the snapshot (e.g., myapp:latest)") + buildCmd.Flags().StringVarP(&buildContext, "context", "c", "", "Build context directory (default: current directory)") + buildCmd.Flags().BoolVar(&buildNoCache, "no-cache", false, "Do not use cache (no-op, for compatibility)") + buildCmd.Flags().StringArrayVar(&buildBuildArgs, "build-arg", nil, "Set build-time variables") + buildCmd.Flags().Int64Var(&buildMemSize, "mem", 0, "Memory size in MiB (default: 1024)") + buildCmd.Flags().Int64Var(&buildVcpuCount, "vcpu", 0, "Number of vCPUs (default: 2)") + buildCmd.Flags().Int64Var(&buildFsSize, "fs-size", 0, "Filesystem size in MiB (default: 4096)") + + // Mark dockerfile flag as deprecated + buildCmd.Flags().MarkDeprecated("dockerfile", "use 'vers docker build -f ' instead") } diff --git a/cmd/config.go b/cmd/config.go index 87f2306..a9451d3 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -27,10 +27,6 @@ func applyFlagOverrides(cmd *cobra.Command, config *runconfig.Config) { config.Kernel.Name = kernel } - if dockerfile, _ := cmd.Flags().GetString("dockerfile"); dockerfile != "" { - config.Builder.Dockerfile = dockerfile - } - // Override filesystem size if flag is set if fsVm, _ := cmd.Flags().GetInt64("fs-size-vm"); fsVm > 0 { config.Machine.FsSizeVmMib = fsVm diff --git a/cmd/docker.go b/cmd/docker.go new file mode 100644 index 0000000..9456cd4 --- /dev/null +++ b/cmd/docker.go @@ -0,0 +1,303 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/hdresearch/vers-cli/internal/handlers" + pres "github.com/hdresearch/vers-cli/internal/presenters" + "github.com/spf13/cobra" +) + +var ( + dockerMemSize int64 + dockerVcpuCount int64 + dockerFsSize int64 + dockerVMAlias string + dockerDetach bool + dockerPorts []string + dockerEnvVars []string + dockerInteractive bool + dockerBuildContext string + dockerBuildTag string + dockerBuildNoCache bool + dockerBuildArgs []string + dockerDockerfile string + composeProjectName string + composeServices []string + composeNoDeps bool + composeFile string +) + +// dockerCmd represents the docker command group +var dockerCmd = &cobra.Command{ + Use: "docker", + Short: "Docker compatibility commands for Vers VMs", + Long: `Run Docker-style commands that execute on Vers VMs instead of containers. + +This provides a familiar Docker CLI experience while leveraging Vers VM capabilities: + - Full VM isolation (not containers) + - Persistent state with snapshots + - Branch and restore + - Full SSH access + +Similar to how 'uv pip' provides pip compatibility or 'jj git' provides git compatibility, +'vers docker' provides Docker CLI compatibility on top of Vers VMs. + +Examples: + vers docker run ./Dockerfile # Run Dockerfile in a Vers VM + vers docker build -t myapp ./Dockerfile # Build and create a snapshot + vers docker compose up # Start a compose project`, +} + +// dockerRunCmd represents the docker run subcommand +var dockerRunCmd = &cobra.Command{ + Use: "run [dockerfile]", + Short: "Run a Dockerfile on a Vers VM", + Long: `Parse a Dockerfile and execute it on a Vers VM. + +This command: + 1. Creates a new Vers VM + 2. Parses the Dockerfile instructions + 3. Copies the build context to the VM + 4. Executes RUN commands + 5. Starts the application (CMD/ENTRYPOINT) + +The VM persists after the command completes, allowing you to: + - Connect via SSH (vers connect) + - Create snapshots (vers commit) + - Branch the state (vers branch) + +Note: FROM instructions specify the expected base environment but Vers VMs +use their own base image. Common tools (Node.js, Python, etc.) may need +to be installed via RUN commands. + +Examples: + vers docker run # Use ./Dockerfile, current dir as context + vers docker run ./Dockerfile # Specify Dockerfile + vers docker run -f ./Dockerfile.dev # Use alternate Dockerfile + vers docker run -c ./app ./Dockerfile # Custom build context + vers docker run -d ./Dockerfile # Run detached + vers docker run -N myapp ./Dockerfile # Set VM alias + vers docker run --mem 2048 ./Dockerfile # Custom memory (2GB)`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + dockerfilePath := dockerDockerfile + if dockerfilePath == "" && len(args) > 0 { + dockerfilePath = args[0] + } + + // Use longer timeout for docker operations (same as build upload - 10 min) + apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.BuildUpload) + defer cancel() + + req := handlers.DockerRunReq{ + DockerfilePath: dockerfilePath, + BuildContext: dockerBuildContext, + MemSizeMib: dockerMemSize, + VcpuCount: dockerVcpuCount, + FsSizeMib: dockerFsSize, + VMAlias: dockerVMAlias, + Detach: dockerDetach, + PortMappings: dockerPorts, + EnvVars: dockerEnvVars, + Interactive: dockerInteractive, + } + + view, err := handlers.HandleDockerRun(apiCtx, application, req) + if err != nil { + return err + } + + pres.RenderDockerRun(application, view) + return nil + }, +} + +// dockerBuildCmd represents the docker build subcommand (creates a snapshot) +var dockerBuildCmd = &cobra.Command{ + Use: "build [context]", + Short: "Build a Dockerfile as a Vers snapshot", + Long: `Parse a Dockerfile and create a Vers snapshot (commit) from it. + +This is similar to 'docker build' - it creates a reusable image that can +be instantiated later. In Vers terms, this creates a committed VM snapshot. + +The command: + 1. Creates a temporary build VM + 2. Copies the build context + 3. Executes all RUN instructions + 4. Creates a commit (snapshot) + 5. Deletes the build VM (only the snapshot remains) + +Examples: + vers docker build . # Build from current directory + vers docker build -t myapp . # Tag the image + vers docker build -f Dockerfile.prod . # Use specific Dockerfile + vers docker build --dockerfile ./Dockerfile . # Explicit Dockerfile path`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + // Build context is the argument (default: current dir) + buildContext := "." + if len(args) > 0 { + buildContext = args[0] + } + + // Use longer timeout for docker operations + apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.BuildUpload) + defer cancel() + + req := handlers.DockerBuildReq{ + DockerfilePath: dockerDockerfile, + BuildContext: buildContext, + Tag: dockerBuildTag, + MemSizeMib: dockerMemSize, + VcpuCount: dockerVcpuCount, + FsSizeMib: dockerFsSize, + NoCache: dockerBuildNoCache, + BuildArgs: dockerBuildArgs, + } + + view, err := handlers.HandleDockerBuild(apiCtx, application, req) + if err != nil { + return err + } + + pres.RenderDockerBuild(application, view) + return nil + }, +} + +// dockerComposeCmd represents the docker compose command group +var dockerComposeCmd = &cobra.Command{ + Use: "compose", + Short: "Docker Compose compatibility commands", + Long: `Run Docker Compose-style commands that execute on Vers VMs. + +Start multi-service applications defined in docker-compose.yml files. +Each service runs in its own Vers VM, providing full isolation. + +Examples: + vers docker compose up # Start all services + vers docker compose up -d # Start detached + vers docker compose up web api # Start specific services`, +} + +// dockerComposeUpCmd represents the docker compose up subcommand +var dockerComposeUpCmd = &cobra.Command{ + Use: "up [services...]", + Short: "Create and start services defined in docker-compose.yml", + Long: `Create and start all services defined in docker-compose.yml. + +This command: + 1. Parses the compose file + 2. Sorts services by dependency order + 3. Creates a Vers VM for each service + 4. Copies build contexts and runs setup commands + 5. Starts applications (if -d/--detach is specified) + +Each service gets its own VM with an alias: _ + +Examples: + vers docker compose up # Start all services + vers docker compose up -d # Start in detached mode + vers docker compose up web db # Start only web and db + vers docker compose up --no-deps web # Start web without dependencies + vers docker compose up -f compose.yml # Use specific compose file`, + RunE: func(cmd *cobra.Command, args []string) error { + apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.BuildUpload) + defer cancel() + + req := handlers.DockerComposeUpReq{ + ComposePath: composeFile, + ProjectName: composeProjectName, + Detach: dockerDetach, + Services: args, // Services to start + NoDeps: composeNoDeps, + EnvVars: dockerEnvVars, + } + + view, err := handlers.HandleDockerComposeUp(apiCtx, application, req) + if err != nil { + return err + } + + pres.RenderDockerComposeUp(application, view) + return nil + }, +} + +// dockerComposePsCmd represents the docker compose ps subcommand +var dockerComposePsCmd = &cobra.Command{ + Use: "ps", + Short: "List running compose services", + Long: `List all VMs that are part of a compose project. + +Examples: + vers docker compose ps + vers docker compose ps -p myproject`, + RunE: func(cmd *cobra.Command, args []string) error { + // TODO: Implement by filtering VMs by project prefix alias + return fmt.Errorf("docker compose ps is not yet implemented - use 'vers status' to list all VMs") + }, +} + +// dockerComposeDownCmd represents the docker compose down subcommand +var dockerComposeDownCmd = &cobra.Command{ + Use: "down", + Short: "Stop and remove compose services", + Long: `Stop and remove all VMs that are part of a compose project. + +Examples: + vers docker compose down + vers docker compose down -p myproject`, + RunE: func(cmd *cobra.Command, args []string) error { + // TODO: Implement by filtering and killing VMs by project prefix + return fmt.Errorf("docker compose down is not yet implemented - use 'vers kill ' to stop individual services") + }, +} + +func init() { + rootCmd.AddCommand(dockerCmd) + dockerCmd.AddCommand(dockerRunCmd) + dockerCmd.AddCommand(dockerBuildCmd) + dockerCmd.AddCommand(dockerComposeCmd) + + // Compose subcommands + dockerComposeCmd.AddCommand(dockerComposeUpCmd) + dockerComposeCmd.AddCommand(dockerComposePsCmd) + dockerComposeCmd.AddCommand(dockerComposeDownCmd) + + // Common flags for docker run + dockerRunCmd.Flags().StringVarP(&dockerBuildContext, "context", "c", "", "Build context directory (default: Dockerfile directory)") + dockerRunCmd.Flags().StringVarP(&dockerDockerfile, "file", "f", "", "Path to Dockerfile (default: ./Dockerfile)") + dockerRunCmd.Flags().Int64Var(&dockerMemSize, "mem", 0, "Memory size in MiB (default: 1024)") + dockerRunCmd.Flags().Int64Var(&dockerVcpuCount, "vcpu", 0, "Number of vCPUs (default: 2)") + dockerRunCmd.Flags().Int64Var(&dockerFsSize, "fs-size", 0, "Filesystem size in MiB (default: 4096)") + dockerRunCmd.Flags().StringVarP(&dockerVMAlias, "name", "N", "", "Set an alias for the VM") + dockerRunCmd.Flags().BoolVarP(&dockerDetach, "detach", "d", false, "Run in detached mode") + dockerRunCmd.Flags().StringArrayVarP(&dockerPorts, "publish", "p", nil, "Publish port (host:container)") + dockerRunCmd.Flags().StringArrayVarP(&dockerEnvVars, "env", "e", nil, "Set environment variables") + dockerRunCmd.Flags().BoolVarP(&dockerInteractive, "interactive", "i", false, "Run interactively") + + // Flags for docker build + dockerBuildCmd.Flags().StringVarP(&dockerBuildTag, "tag", "t", "", "Tag for the snapshot (e.g., myapp:latest)") + dockerBuildCmd.Flags().StringVarP(&dockerDockerfile, "file", "f", "", "Path to Dockerfile (default: ./Dockerfile)") + dockerBuildCmd.Flags().StringVar(&dockerDockerfile, "dockerfile", "", "Path to Dockerfile (alias for -f)") + dockerBuildCmd.Flags().BoolVar(&dockerBuildNoCache, "no-cache", false, "Do not use cache (no-op, for compatibility)") + dockerBuildCmd.Flags().StringArrayVar(&dockerBuildArgs, "build-arg", nil, "Set build-time variables") + dockerBuildCmd.Flags().Int64Var(&dockerMemSize, "mem", 0, "Memory size in MiB (default: 1024)") + dockerBuildCmd.Flags().Int64Var(&dockerVcpuCount, "vcpu", 0, "Number of vCPUs (default: 2)") + dockerBuildCmd.Flags().Int64Var(&dockerFsSize, "fs-size", 0, "Filesystem size in MiB (default: 4096)") + + // Flags for docker compose up + dockerComposeUpCmd.Flags().StringVarP(&composeFile, "file", "f", "", "Compose file path (default: docker-compose.yml)") + dockerComposeUpCmd.Flags().StringVarP(&composeProjectName, "project-name", "p", "", "Project name (default: directory name)") + dockerComposeUpCmd.Flags().BoolVarP(&dockerDetach, "detach", "d", false, "Run in detached mode") + dockerComposeUpCmd.Flags().BoolVar(&composeNoDeps, "no-deps", false, "Don't start linked services") + dockerComposeUpCmd.Flags().StringArrayVarP(&dockerEnvVars, "env", "e", nil, "Set environment variables") + + // Flags for docker compose ps/down + dockerComposePsCmd.Flags().StringVarP(&composeProjectName, "project-name", "p", "", "Project name") + dockerComposeDownCmd.Flags().StringVarP(&composeProjectName, "project-name", "p", "", "Project name") +} diff --git a/cmd/execute.go b/cmd/execute.go index 031566f..26a5a1d 100644 --- a/cmd/execute.go +++ b/cmd/execute.go @@ -2,6 +2,7 @@ package cmd import ( "context" + "time" "github.com/hdresearch/vers-cli/internal/handlers" pres "github.com/hdresearch/vers-cli/internal/presenters" @@ -16,7 +17,16 @@ var executeCmd = &cobra.Command{ If no VM is specified, the current HEAD VM is used.`, Args: cobra.MinimumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.APIMedium) + // Get timeout from flag, default to APIMedium (30s) + timeoutSec, _ := cmd.Flags().GetInt("timeout") + var timeout time.Duration + if timeoutSec > 0 { + timeout = time.Duration(timeoutSec) * time.Second + } else { + timeout = application.Timeouts.APIMedium + } + + apiCtx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() // Determine if the first arg is a VM target or part of the command. @@ -48,4 +58,5 @@ If no VM is specified, the current HEAD VM is used.`, func init() { rootCmd.AddCommand(executeCmd) executeCmd.Flags().String("host", "", "Specify the host IP to connect to (overrides default)") + executeCmd.Flags().IntP("timeout", "t", 0, "Timeout in seconds (default: 30s, use 0 for no limit)") } diff --git a/go.mod b/go.mod index ffcf24c..254ced5 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/spf13/cobra v1.9.1 golang.org/x/crypto v0.46.0 golang.org/x/term v0.38.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( diff --git a/go.sum b/go.sum index 23c07f9..5ccab1e 100644 --- a/go.sum +++ b/go.sum @@ -31,12 +31,6 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.2.3 h1:dkP3B96OtZKKFvdrUSaDkL+YDx8Uw9uC4Y+eukpCnmM= github.com/google/jsonschema-go v0.2.3/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.26 h1:1v+kyEDBeMLpKXUvtcgrD50xvKu45EmAYehMnjqzDc0= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.26/go.mod h1:aJoQGYzJHXdbj7uhCekUZaxbMu+XhVMOCtVQEdA0NFI= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.26.0.20260109230209-26478ae5d25f h1:4xYwGPfYc5d069HtGGsPM6Sl0GXpoyKRJNaNwhxJIJ0= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.26.0.20260109230209-26478ae5d25f/go.mod h1:aJoQGYzJHXdbj7uhCekUZaxbMu+XhVMOCtVQEdA0NFI= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.27.0.20260117042733-a0128fdb7faa h1:2vmL2H67lOy/YNius3b7asFyT8z0wx+whkT2XvLYu1A= -github.com/hdresearch/vers-sdk-go v0.1.0-alpha.27.0.20260117042733-a0128fdb7faa/go.mod h1:aJoQGYzJHXdbj7uhCekUZaxbMu+XhVMOCtVQEdA0NFI= github.com/hdresearch/vers-sdk-go v0.1.0-alpha.27.0.20260119153316-fe17269b5c0d h1:UflS9l7pyCL8EbsxRGG+6KWxzsMBSxwD3m9tSxh7utw= github.com/hdresearch/vers-sdk-go v0.1.0-alpha.27.0.20260119153316-fe17269b5c0d/go.mod h1:aJoQGYzJHXdbj7uhCekUZaxbMu+XhVMOCtVQEdA0NFI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -110,6 +104,7 @@ golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/docker/compose.go b/internal/docker/compose.go new file mode 100644 index 0000000..2a2581b --- /dev/null +++ b/internal/docker/compose.go @@ -0,0 +1,316 @@ +package docker + +import ( + "fmt" + "os" + "path/filepath" + "sort" + + "gopkg.in/yaml.v3" +) + +// ComposeFile represents a docker-compose.yml file +type ComposeFile struct { + Version string `yaml:"version"` + Services map[string]ComposeService `yaml:"services"` +} + +// ComposeService represents a service in docker-compose.yml +type ComposeService struct { + Image string `yaml:"image"` + Build *ComposeBuild `yaml:"build"` + Ports []string `yaml:"ports"` + Environment interface{} `yaml:"environment"` // Can be []string or map[string]string + DependsOn []string `yaml:"depends_on"` + Command interface{} `yaml:"command"` // Can be string or []string + Volumes []string `yaml:"volumes"` + WorkingDir string `yaml:"working_dir"` + EnvFile interface{} `yaml:"env_file"` // Can be string or []string + Labels map[string]string `yaml:"labels"` + MemLimit string `yaml:"mem_limit"` + CPUs float64 `yaml:"cpus"` +} + +// ComposeBuild represents the build configuration for a service +type ComposeBuild struct { + Context string `yaml:"context"` + Dockerfile string `yaml:"dockerfile"` +} + +// UnmarshalYAML handles the case where build can be a string or struct +func (b *ComposeBuild) UnmarshalYAML(value *yaml.Node) error { + // Try string first (short form: build: ./path) + var strVal string + if err := value.Decode(&strVal); err == nil { + b.Context = strVal + b.Dockerfile = "Dockerfile" + return nil + } + + // Otherwise try struct form + type buildAlias ComposeBuild + var build buildAlias + if err := value.Decode(&build); err != nil { + return err + } + *b = ComposeBuild(build) + + // Set defaults + if b.Dockerfile == "" { + b.Dockerfile = "Dockerfile" + } + + return nil +} + +// ParsedService contains a fully resolved service configuration +type ParsedService struct { + Name string + Dockerfile string // Path to Dockerfile (if build context) + BuildContext string // Build context directory + Image string // Base image (if no build) + Ports []string // Port mappings + Environment map[string]string // Environment variables + DependsOn []string // Services this depends on + Command []string // Command to run + WorkingDir string // Working directory + MemSizeMib int64 // Memory in MiB + VcpuCount int64 // Number of vCPUs +} + +// ParseComposeFile reads and parses a docker-compose.yml file +func ParseComposeFile(path string) (*ComposeFile, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read compose file: %w", err) + } + + var compose ComposeFile + if err := yaml.Unmarshal(data, &compose); err != nil { + return nil, fmt.Errorf("failed to parse compose file: %w", err) + } + + if len(compose.Services) == 0 { + return nil, fmt.Errorf("compose file has no services defined") + } + + return &compose, nil +} + +// ParseServices resolves all services in the compose file to ParsedService structs +func (c *ComposeFile) ParseServices(baseDir string) ([]ParsedService, error) { + var services []ParsedService + + for name, svc := range c.Services { + parsed := ParsedService{ + Name: name, + Image: svc.Image, + Ports: svc.Ports, + DependsOn: svc.DependsOn, + WorkingDir: svc.WorkingDir, + MemSizeMib: 1024, // Default + VcpuCount: 2, // Default + } + + // Parse build configuration + if svc.Build != nil { + buildContext := svc.Build.Context + if !filepath.IsAbs(buildContext) { + buildContext = filepath.Join(baseDir, buildContext) + } + parsed.BuildContext = buildContext + parsed.Dockerfile = filepath.Join(buildContext, svc.Build.Dockerfile) + } + + // Parse environment variables + parsed.Environment = parseEnvironment(svc.Environment) + + // Parse command + parsed.Command = parseCommand(svc.Command) + + // Parse memory limit (e.g., "512m", "1g") + if svc.MemLimit != "" { + parsed.MemSizeMib = parseMemoryLimit(svc.MemLimit) + } + + // Parse CPU limit + if svc.CPUs > 0 { + parsed.VcpuCount = int64(svc.CPUs) + if parsed.VcpuCount < 1 { + parsed.VcpuCount = 1 + } + } + + services = append(services, parsed) + } + + // Sort by dependency order (topological sort) + sorted, err := topologicalSort(services) + if err != nil { + return nil, err + } + + return sorted, nil +} + +// parseEnvironment handles the various formats for environment variables +func parseEnvironment(env interface{}) map[string]string { + result := make(map[string]string) + + switch e := env.(type) { + case []interface{}: + // Format: ["KEY=value", "KEY2=value2"] + for _, item := range e { + if s, ok := item.(string); ok { + if idx := indexOf(s, '='); idx > 0 { + result[s[:idx]] = s[idx+1:] + } else { + // Just key, value from host env + result[s] = os.Getenv(s) + } + } + } + case map[string]interface{}: + // Format: {KEY: value, KEY2: value2} + for k, v := range e { + if v == nil { + result[k] = os.Getenv(k) + } else { + result[k] = fmt.Sprintf("%v", v) + } + } + } + + return result +} + +// parseCommand handles the various formats for command +func parseCommand(cmd interface{}) []string { + switch c := cmd.(type) { + case string: + return []string{c} + case []interface{}: + var result []string + for _, item := range c { + if s, ok := item.(string); ok { + result = append(result, s) + } + } + return result + } + return nil +} + +// parseMemoryLimit parses memory limits like "512m", "1g", "1024" +func parseMemoryLimit(limit string) int64 { + if len(limit) == 0 { + return 1024 + } + + var multiplier int64 = 1 + suffix := limit[len(limit)-1] + numStr := limit + + switch suffix { + case 'g', 'G': + multiplier = 1024 + numStr = limit[:len(limit)-1] + case 'm', 'M': + multiplier = 1 + numStr = limit[:len(limit)-1] + case 'k', 'K': + multiplier = 1 // Round up to 1 MiB minimum + numStr = limit[:len(limit)-1] + } + + var num int64 + fmt.Sscanf(numStr, "%d", &num) + result := num * multiplier + if result < 256 { + return 256 // Minimum + } + return result +} + +// indexOf finds the index of a rune in a string +func indexOf(s string, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +// topologicalSort sorts services by dependency order +func topologicalSort(services []ParsedService) ([]ParsedService, error) { + // Build adjacency list + serviceMap := make(map[string]*ParsedService) + for i := range services { + serviceMap[services[i].Name] = &services[i] + } + + // Kahn's algorithm + inDegree := make(map[string]int) + for _, svc := range services { + if _, exists := inDegree[svc.Name]; !exists { + inDegree[svc.Name] = 0 + } + for _, dep := range svc.DependsOn { + inDegree[svc.Name]++ + if _, exists := inDegree[dep]; !exists { + inDegree[dep] = 0 + } + } + } + + // Find all nodes with no incoming edges + var queue []string + for name, degree := range inDegree { + if degree == 0 { + queue = append(queue, name) + } + } + sort.Strings(queue) // Stable sort for determinism + + var result []ParsedService + for len(queue) > 0 { + // Pop first element + name := queue[0] + queue = queue[1:] + + if svc, exists := serviceMap[name]; exists { + result = append(result, *svc) + } + + // Decrease in-degree for dependents + for _, svc := range services { + for _, dep := range svc.DependsOn { + if dep == name { + inDegree[svc.Name]-- + if inDegree[svc.Name] == 0 { + queue = append(queue, svc.Name) + sort.Strings(queue) + } + } + } + } + } + + // Check for cycles + if len(result) != len(services) { + return nil, fmt.Errorf("circular dependency detected in compose services") + } + + return result, nil +} + +// GetServiceNames returns all service names +func (c *ComposeFile) GetServiceNames() []string { + var names []string + for name := range c.Services { + names = append(names, name) + } + sort.Strings(names) + return names +} diff --git a/internal/docker/compose_executor.go b/internal/docker/compose_executor.go new file mode 100644 index 0000000..0066d64 --- /dev/null +++ b/internal/docker/compose_executor.go @@ -0,0 +1,414 @@ +package docker + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/hdresearch/vers-cli/internal/app" + vmSvc "github.com/hdresearch/vers-cli/internal/services/vm" + sshutil "github.com/hdresearch/vers-cli/internal/ssh" + "github.com/hdresearch/vers-cli/internal/utils" + vers "github.com/hdresearch/vers-sdk-go" +) + +// ComposeConfig holds configuration for running a compose file +type ComposeConfig struct { + ComposePath string // Path to docker-compose.yml + ProjectName string // Project name (default: directory name) + Detach bool // Run in detached mode + Services []string // Specific services to run (empty = all) + NoDeps bool // Don't start dependencies + EnvVars []string // Additional environment variables +} + +// ServiceResult contains the result of starting a single service +type ServiceResult struct { + Name string + VMID string + VMAlias string + Ports []string + Running bool + SetupComplete bool + Error error +} + +// ComposeResult contains the result of running docker compose up +type ComposeResult struct { + ProjectName string + Services []ServiceResult + TotalServices int +} + +// ComposeExecutor handles running docker-compose files on Vers VMs +type ComposeExecutor struct { + app *app.App +} + +// NewComposeExecutor creates a new compose executor +func NewComposeExecutor(app *app.App) *ComposeExecutor { + return &ComposeExecutor{app: app} +} + +// Up starts all services in the compose file +func (e *ComposeExecutor) Up(ctx context.Context, cfg ComposeConfig, stdout, stderr io.Writer) (*ComposeResult, error) { + result := &ComposeResult{ + ProjectName: cfg.ProjectName, + } + + // Parse the compose file + compose, err := ParseComposeFile(cfg.ComposePath) + if err != nil { + return nil, fmt.Errorf("failed to parse compose file: %w", err) + } + + // Get base directory for resolving relative paths + baseDir := filepath.Dir(cfg.ComposePath) + if baseDir == "" { + baseDir = "." + } + + // Parse and sort services by dependency order + services, err := compose.ParseServices(baseDir) + if err != nil { + return nil, err + } + + // Filter services if specific ones were requested + if len(cfg.Services) > 0 && !cfg.NoDeps { + services = filterServicesWithDeps(services, cfg.Services) + } else if len(cfg.Services) > 0 { + services = filterServices(services, cfg.Services) + } + + result.TotalServices = len(services) + fmt.Fprintf(stdout, "๐Ÿณ Starting %d services from %s\n", len(services), cfg.ComposePath) + fmt.Fprintln(stdout) + + // Track VMs by service name for inter-service communication + vmMap := &sync.Map{} + + // Start services in dependency order + for _, svc := range services { + fmt.Fprintf(stdout, "โ”โ”โ” Starting service: %s โ”โ”โ”\n", svc.Name) + + svcResult, err := e.startService(ctx, cfg, svc, vmMap, stdout, stderr) + if err != nil { + svcResult = ServiceResult{ + Name: svc.Name, + Error: err, + } + fmt.Fprintf(stderr, " โŒ Failed: %v\n", err) + } + + result.Services = append(result.Services, svcResult) + + if svcResult.VMID != "" { + vmMap.Store(svc.Name, svcResult.VMID) + } + + fmt.Fprintln(stdout) + } + + return result, nil +} + +// startService starts a single service +func (e *ComposeExecutor) startService(ctx context.Context, cfg ComposeConfig, svc ParsedService, vmMap *sync.Map, stdout, stderr io.Writer) (ServiceResult, error) { + result := ServiceResult{ + Name: svc.Name, + Ports: svc.Ports, + } + + // Create VM alias from project name and service name + vmAlias := fmt.Sprintf("%s_%s", cfg.ProjectName, svc.Name) + result.VMAlias = vmAlias + + // Check if this service has a Dockerfile or is image-based + var df *Dockerfile + if svc.Dockerfile != "" { + var err error + df, err = ParseDockerfile(svc.Dockerfile) + if err != nil { + return result, fmt.Errorf("failed to parse Dockerfile: %w", err) + } + fmt.Fprintf(stdout, " ๐Ÿ“„ Using Dockerfile: %s\n", svc.Dockerfile) + } else if svc.Image != "" { + fmt.Fprintf(stdout, " ๐Ÿ“ฆ Image-based service: %s\n", svc.Image) + fmt.Fprintf(stdout, " โš ๏ธ Note: Using Vers base image (not %s)\n", svc.Image) + // Create a minimal Dockerfile equivalent + df = &Dockerfile{ + BaseImage: svc.Image, + Env: make(map[string]string), + } + } else { + return result, fmt.Errorf("service has no build or image specified") + } + + // Create VM + fmt.Fprintln(stdout, " ๐Ÿš€ Creating Vers VM...") + + vmConfig := vers.NewRootRequestVmConfigParam{ + MemSizeMib: vers.F(svc.MemSizeMib), + VcpuCount: vers.F(svc.VcpuCount), + FsSizeMib: vers.F(int64(4096)), // Default 4GB + } + + body := vers.VmNewRootParams{ + NewRootRequest: vers.NewRootRequestParam{ + VmConfig: vers.F(vmConfig), + }, + } + + resp, err := e.app.Client.Vm.NewRoot(ctx, body) + if err != nil { + return result, fmt.Errorf("failed to create VM: %w", err) + } + + result.VMID = resp.VmID + fmt.Fprintf(stdout, " VM: %s\n", result.VMID) + + // Set alias + if err := utils.SetAlias(vmAlias, result.VMID); err != nil { + fmt.Fprintf(stderr, " Warning: could not set alias: %v\n", err) + } + + // Get connection info + info, err := vmSvc.GetConnectInfo(ctx, e.app.Client, result.VMID) + if err != nil { + return result, fmt.Errorf("failed to get VM connection info: %w", err) + } + + sshClient := sshutil.NewClient(info.Host, info.KeyPath, info.VMDomain) + + // Wait for VM to be ready + fmt.Fprintln(stdout, " โณ Waiting for VM...") + if err := e.waitForSSH(ctx, sshClient); err != nil { + return result, fmt.Errorf("VM not ready: %w", err) + } + + // Setup workdir + workdir := svc.WorkingDir + if workdir == "" && df.WorkDir != "" { + workdir = df.WorkDir + } + if workdir == "" { + workdir = "/app" + } + + mkdirCmd := fmt.Sprintf("mkdir -p %s", workdir) + if err := sshClient.Execute(ctx, mkdirCmd, io.Discard, stderr); err != nil { + return result, fmt.Errorf("failed to create workdir: %w", err) + } + + // Copy build context if available + if svc.BuildContext != "" { + fmt.Fprintf(stdout, " ๐Ÿ“ฆ Copying build context...\n") + if err := e.copyBuildContext(ctx, sshClient, svc.BuildContext, workdir, df, stderr); err != nil { + return result, fmt.Errorf("failed to copy build context: %w", err) + } + } + + // Merge environment variables + env := make(map[string]string) + for k, v := range df.Env { + env[k] = v + } + for k, v := range svc.Environment { + env[k] = v + } + // Add service discovery env vars (other services' VMs) + vmMap.Range(func(key, value interface{}) bool { + svcName := key.(string) + vmID := value.(string) + // Services can reach each other via their aliases + env[fmt.Sprintf("%s_HOST", strings.ToUpper(svcName))] = vmAlias + env[fmt.Sprintf("%s_VM", strings.ToUpper(svcName))] = vmID + return true + }) + + // Set environment variables + if len(env) > 0 { + if err := e.setupEnvironment(ctx, sshClient, env, cfg.EnvVars, stderr); err != nil { + return result, fmt.Errorf("failed to set environment: %w", err) + } + } + + // Run setup commands + runCommands := df.GetRunCommands() + if len(runCommands) > 0 { + fmt.Fprintf(stdout, " ๐Ÿ”จ Running %d setup commands...\n", len(runCommands)) + for _, cmd := range runCommands { + fullCmd := fmt.Sprintf("cd %s && %s", workdir, cmd) + if err := sshClient.Execute(ctx, fullCmd, stdout, stderr); err != nil { + return result, fmt.Errorf("setup command failed: %w", err) + } + } + } + result.SetupComplete = true + + // Determine start command + startCmd := svc.Command + if len(startCmd) == 0 { + startCmd = df.GetStartCommand() + } + + // Start the application + if len(startCmd) > 0 { + cmdStr := strings.Join(startCmd, " ") + fullCmd := fmt.Sprintf("cd %s && %s", workdir, cmdStr) + + if cfg.Detach { + fmt.Fprintf(stdout, " โ–ถ๏ธ Starting: %s\n", truncateString(cmdStr, 50)) + bgCmd := fmt.Sprintf("nohup sh -c '%s' > /tmp/app.log 2>&1 &", fullCmd) + if err := sshClient.Execute(ctx, bgCmd, io.Discard, stderr); err != nil { + return result, fmt.Errorf("failed to start application: %w", err) + } + result.Running = true + } else { + fmt.Fprintf(stdout, " โ„น๏ธ Ready to start: %s\n", truncateString(cmdStr, 50)) + } + } + + fmt.Fprintf(stdout, " โœ… Service %s ready\n", svc.Name) + return result, nil +} + +// waitForSSH waits for SSH to become available +func (e *ComposeExecutor) waitForSSH(ctx context.Context, client *sshutil.Client) error { + maxAttempts := 60 + for i := 0; i < maxAttempts; i++ { + err := client.Execute(ctx, "echo ready", io.Discard, io.Discard) + if err == nil { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + } + } + return fmt.Errorf("timeout waiting for VM SSH") +} + +// copyBuildContext copies the build context to the VM +func (e *ComposeExecutor) copyBuildContext(ctx context.Context, client *sshutil.Client, buildContext, workdir string, df *Dockerfile, stderr io.Writer) error { + copies := df.GetCopyInstructions() + + if len(copies) == 0 { + // Copy everything + return client.Upload(ctx, buildContext, workdir, true) + } + + for _, copy := range copies { + if len(copy.Args) < 2 { + continue + } + src := copy.Args[0] + dst := copy.Args[len(copy.Args)-1] + + srcPath := filepath.Join(buildContext, src) + dstPath := dst + if !filepath.IsAbs(dst) { + dstPath = filepath.Join(workdir, dst) + } + + info, err := os.Stat(srcPath) + if err != nil { + // Try glob + matches, _ := filepath.Glob(srcPath) + if len(matches) == 0 { + continue + } + for _, m := range matches { + mInfo, _ := os.Stat(m) + if err := client.Upload(ctx, m, filepath.Join(dstPath, filepath.Base(m)), mInfo != nil && mInfo.IsDir()); err != nil { + return err + } + } + continue + } + + if err := client.Upload(ctx, srcPath, dstPath, info.IsDir()); err != nil { + return err + } + } + + return nil +} + +// setupEnvironment sets environment variables +func (e *ComposeExecutor) setupEnvironment(ctx context.Context, client *sshutil.Client, env map[string]string, additional []string, stderr io.Writer) error { + var envLines []string + for key, value := range env { + envLines = append(envLines, fmt.Sprintf("%s=%q", key, value)) + } + for _, envVar := range additional { + envLines = append(envLines, envVar) + } + + if len(envLines) == 0 { + return nil + } + + cmd := fmt.Sprintf("cat >> /etc/environment << 'EOF'\n%s\nEOF", strings.Join(envLines, "\n")) + return client.Execute(ctx, cmd, io.Discard, stderr) +} + +// filterServicesWithDeps filters to requested services plus their dependencies +func filterServicesWithDeps(services []ParsedService, requested []string) []ParsedService { + requestedSet := make(map[string]bool) + for _, name := range requested { + requestedSet[name] = true + } + + // Add all dependencies recursively + svcMap := make(map[string]ParsedService) + for _, svc := range services { + svcMap[svc.Name] = svc + } + + var addDeps func(name string) + addDeps = func(name string) { + if svc, ok := svcMap[name]; ok { + requestedSet[name] = true + for _, dep := range svc.DependsOn { + addDeps(dep) + } + } + } + + for _, name := range requested { + addDeps(name) + } + + // Filter + var result []ParsedService + for _, svc := range services { + if requestedSet[svc.Name] { + result = append(result, svc) + } + } + return result +} + +// filterServices filters to only the requested services +func filterServices(services []ParsedService, requested []string) []ParsedService { + requestedSet := make(map[string]bool) + for _, name := range requested { + requestedSet[name] = true + } + + var result []ParsedService + for _, svc := range services { + if requestedSet[svc.Name] { + result = append(result, svc) + } + } + return result +} diff --git a/internal/docker/compose_test.go b/internal/docker/compose_test.go new file mode 100644 index 0000000..2f79fc5 --- /dev/null +++ b/internal/docker/compose_test.go @@ -0,0 +1,249 @@ +package docker + +import ( + "os" + "path/filepath" + "testing" +) + +func TestParseComposeFile(t *testing.T) { + // Create a temporary compose file + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "docker-compose.yml") + + composeContent := ` +version: "3.8" +services: + web: + build: + context: ./web + dockerfile: Dockerfile.prod + ports: + - "3000:3000" + environment: + - NODE_ENV=production + - API_URL=http://api:8080 + depends_on: + - api + - db + api: + build: ./api + ports: + - "8080:8080" + environment: + DEBUG: "true" + depends_on: + - db + db: + image: postgres:15 + environment: + POSTGRES_PASSWORD: secret + POSTGRES_DB: myapp +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write compose file: %v", err) + } + + // Parse the file + compose, err := ParseComposeFile(composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + // Verify version + if compose.Version != "3.8" { + t.Errorf("Expected version 3.8, got %s", compose.Version) + } + + // Verify services + if len(compose.Services) != 3 { + t.Errorf("Expected 3 services, got %d", len(compose.Services)) + } + + // Check web service + web, ok := compose.Services["web"] + if !ok { + t.Fatal("web service not found") + } + if web.Build == nil { + t.Fatal("web service should have build config") + } + if web.Build.Context != "./web" { + t.Errorf("Expected build context ./web, got %s", web.Build.Context) + } + if web.Build.Dockerfile != "Dockerfile.prod" { + t.Errorf("Expected Dockerfile.prod, got %s", web.Build.Dockerfile) + } + if len(web.Ports) != 1 || web.Ports[0] != "3000:3000" { + t.Errorf("Unexpected ports: %v", web.Ports) + } + + // Check api service (short build format) + api, ok := compose.Services["api"] + if !ok { + t.Fatal("api service not found") + } + if api.Build == nil { + t.Fatal("api service should have build config") + } + if api.Build.Context != "./api" { + t.Errorf("Expected build context ./api, got %s", api.Build.Context) + } + if api.Build.Dockerfile != "Dockerfile" { + t.Errorf("Expected default Dockerfile, got %s", api.Build.Dockerfile) + } + + // Check db service (image-based) + db, ok := compose.Services["db"] + if !ok { + t.Fatal("db service not found") + } + if db.Image != "postgres:15" { + t.Errorf("Expected postgres:15, got %s", db.Image) + } +} + +func TestParseServices(t *testing.T) { + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "docker-compose.yml") + + composeContent := ` +version: "3" +services: + frontend: + build: ./frontend + depends_on: + - backend + backend: + build: ./backend + depends_on: + - database + database: + image: mysql:8 +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write compose file: %v", err) + } + + compose, err := ParseComposeFile(composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + services, err := compose.ParseServices(tmpDir) + if err != nil { + t.Fatalf("Failed to parse services: %v", err) + } + + // Verify topological order: database -> backend -> frontend + if len(services) != 3 { + t.Fatalf("Expected 3 services, got %d", len(services)) + } + + if services[0].Name != "database" { + t.Errorf("Expected database first, got %s", services[0].Name) + } + if services[1].Name != "backend" { + t.Errorf("Expected backend second, got %s", services[1].Name) + } + if services[2].Name != "frontend" { + t.Errorf("Expected frontend third, got %s", services[2].Name) + } +} + +func TestCircularDependency(t *testing.T) { + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "docker-compose.yml") + + composeContent := ` +version: "3" +services: + a: + image: alpine + depends_on: + - b + b: + image: alpine + depends_on: + - c + c: + image: alpine + depends_on: + - a +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write compose file: %v", err) + } + + compose, err := ParseComposeFile(composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + _, err = compose.ParseServices(tmpDir) + if err == nil { + t.Fatal("Expected circular dependency error") + } +} + +func TestParseEnvironment(t *testing.T) { + tests := []struct { + name string + input interface{} + expected map[string]string + }{ + { + name: "list format", + input: []interface{}{"FOO=bar", "BAZ=qux"}, + expected: map[string]string{ + "FOO": "bar", + "BAZ": "qux", + }, + }, + { + name: "map format", + input: map[string]interface{}{ + "FOO": "bar", + "BAZ": "qux", + }, + expected: map[string]string{ + "FOO": "bar", + "BAZ": "qux", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := parseEnvironment(tc.input) + for k, v := range tc.expected { + if result[k] != v { + t.Errorf("Expected %s=%s, got %s=%s", k, v, k, result[k]) + } + } + }) + } +} + +func TestParseMemoryLimit(t *testing.T) { + tests := []struct { + input string + expected int64 + }{ + {"512m", 512}, + {"1g", 1024}, + {"2G", 2048}, + {"256M", 256}, + {"100", 256}, // Minimum + {"", 1024}, // Default + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result := parseMemoryLimit(tc.input) + if result != tc.expected { + t.Errorf("parseMemoryLimit(%q) = %d, expected %d", tc.input, result, tc.expected) + } + }) + } +} diff --git a/internal/docker/executor.go b/internal/docker/executor.go new file mode 100644 index 0000000..83c5989 --- /dev/null +++ b/internal/docker/executor.go @@ -0,0 +1,336 @@ +package docker + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hdresearch/vers-cli/internal/app" + vmSvc "github.com/hdresearch/vers-cli/internal/services/vm" + sshutil "github.com/hdresearch/vers-cli/internal/ssh" + "github.com/hdresearch/vers-cli/internal/utils" + vers "github.com/hdresearch/vers-sdk-go" +) + +// RunConfig holds configuration for running a Dockerfile on a Vers VM +type RunConfig struct { + DockerfilePath string + BuildContext string // Directory to use as build context + MemSizeMib int64 + VcpuCount int64 + FsSizeMib int64 + VMAlias string + Detach bool // Run in detached mode + PortMappings []string // Port mappings (host:container format) + EnvVars []string // Additional environment variables + Interactive bool // Run interactively +} + +// RunResult contains the result of running a Dockerfile +type RunResult struct { + VMID string + VMAlias string + Dockerfile *Dockerfile + ExposedPorts []string + StartCommand []string + SetupComplete bool + Running bool +} + +// Executor handles running Dockerfiles on Vers VMs +type Executor struct { + app *app.App +} + +// NewExecutor creates a new Dockerfile executor +func NewExecutor(app *app.App) *Executor { + return &Executor{app: app} +} + +// Run executes a Dockerfile by: +// 1. Creating a new Vers VM +// 2. Copying the build context +// 3. Running setup commands (RUN instructions) +// 4. Starting the application (CMD/ENTRYPOINT) +func (e *Executor) Run(ctx context.Context, cfg RunConfig, stdout, stderr io.Writer) (*RunResult, error) { + result := &RunResult{} + + // Parse the Dockerfile + df, err := ParseDockerfile(cfg.DockerfilePath) + if err != nil { + return nil, fmt.Errorf("failed to parse Dockerfile: %w", err) + } + result.Dockerfile = df + result.ExposedPorts = df.ExposePorts + result.StartCommand = df.GetStartCommand() + + fmt.Fprintln(stdout, "๐Ÿ“„ Parsed Dockerfile") + fmt.Fprintf(stdout, " Base image: %s\n", df.BaseImage) + if df.WorkDir != "" { + fmt.Fprintf(stdout, " Workdir: %s\n", df.WorkDir) + } + if len(df.ExposePorts) > 0 { + fmt.Fprintf(stdout, " Exposed ports: %s\n", strings.Join(df.ExposePorts, ", ")) + } + + // Step 1: Create a new VM + fmt.Fprintln(stdout, "\n๐Ÿš€ Creating Vers VM...") + + vmConfig := vers.NewRootRequestVmConfigParam{ + MemSizeMib: vers.F(cfg.MemSizeMib), + VcpuCount: vers.F(cfg.VcpuCount), + FsSizeMib: vers.F(cfg.FsSizeMib), + } + + body := vers.VmNewRootParams{ + NewRootRequest: vers.NewRootRequestParam{ + VmConfig: vers.F(vmConfig), + }, + } + + resp, err := e.app.Client.Vm.NewRoot(ctx, body) + if err != nil { + return nil, fmt.Errorf("failed to create VM: %w", err) + } + + result.VMID = resp.VmID + fmt.Fprintf(stdout, " VM created: %s\n", result.VMID) + + // Set HEAD and alias + if err := utils.SetHead(result.VMID); err != nil { + fmt.Fprintf(stderr, "Warning: could not set HEAD: %v\n", err) + } + + if cfg.VMAlias != "" { + if err := utils.SetAlias(cfg.VMAlias, result.VMID); err != nil { + fmt.Fprintf(stderr, "Warning: could not set alias: %v\n", err) + } + result.VMAlias = cfg.VMAlias + } + + // Step 2: Get connection info and create SSH client + info, err := vmSvc.GetConnectInfo(ctx, e.app.Client, result.VMID) + if err != nil { + return nil, fmt.Errorf("failed to get VM connection info: %w", err) + } + + sshClient := sshutil.NewClient(info.Host, info.KeyPath, info.VMDomain) + + // Wait for VM to be ready + fmt.Fprintln(stdout, "\nโณ Waiting for VM to be ready...") + if err := e.waitForSSH(ctx, sshClient, stdout); err != nil { + return nil, fmt.Errorf("VM not ready: %w", err) + } + fmt.Fprintln(stdout, " VM is ready!") + + // Step 3: Setup workdir + workdir := df.WorkDir + if workdir == "" { + workdir = "/app" + } + fmt.Fprintf(stdout, "\n๐Ÿ“ Setting up workdir: %s\n", workdir) + + mkdirCmd := fmt.Sprintf("mkdir -p %s", workdir) + if err := sshClient.Execute(ctx, mkdirCmd, stdout, stderr); err != nil { + return nil, fmt.Errorf("failed to create workdir: %w", err) + } + + // Step 4: Copy build context + if cfg.BuildContext != "" { + fmt.Fprintf(stdout, "\n๐Ÿ“ฆ Copying build context from: %s\n", cfg.BuildContext) + if err := e.copyBuildContext(ctx, sshClient, cfg.BuildContext, workdir, df, stdout, stderr); err != nil { + return nil, fmt.Errorf("failed to copy build context: %w", err) + } + } + + // Step 5: Set environment variables + if len(df.Env) > 0 || len(cfg.EnvVars) > 0 { + fmt.Fprintln(stdout, "\n๐Ÿ”ง Setting environment variables...") + if err := e.setupEnvironment(ctx, sshClient, df.Env, cfg.EnvVars, stderr); err != nil { + return nil, fmt.Errorf("failed to set environment: %w", err) + } + } + + // Step 6: Run setup commands (RUN instructions) + runCommands := df.GetRunCommands() + if len(runCommands) > 0 { + fmt.Fprintf(stdout, "\n๐Ÿ”จ Running %d setup commands...\n", len(runCommands)) + for i, cmd := range runCommands { + fmt.Fprintf(stdout, " [%d/%d] %s\n", i+1, len(runCommands), truncateString(cmd, 60)) + + // Execute in workdir + fullCmd := fmt.Sprintf("cd %s && %s", workdir, cmd) + if err := sshClient.Execute(ctx, fullCmd, stdout, stderr); err != nil { + return nil, fmt.Errorf("setup command failed: %s: %w", truncateString(cmd, 40), err) + } + } + } + result.SetupComplete = true + + // Step 7: Start the application + startCmd := result.StartCommand + if len(startCmd) > 0 { + cmdStr := strings.Join(startCmd, " ") + fullCmd := fmt.Sprintf("cd %s && %s", workdir, cmdStr) + + if cfg.Detach { + fmt.Fprintf(stdout, "\nโ–ถ๏ธ Starting application (detached): %s\n", cmdStr) + // Run in background with nohup + bgCmd := fmt.Sprintf("nohup sh -c '%s' > /tmp/app.log 2>&1 &", fullCmd) + if err := sshClient.Execute(ctx, bgCmd, stdout, stderr); err != nil { + return nil, fmt.Errorf("failed to start application: %w", err) + } + result.Running = true + fmt.Fprintln(stdout, " Application started in background") + fmt.Fprintln(stdout, " Logs: /tmp/app.log") + } else if cfg.Interactive { + fmt.Fprintf(stdout, "\nโ–ถ๏ธ Starting application (interactive): %s\n", cmdStr) + // Run interactively + if err := sshClient.InteractiveCommand(ctx, fullCmd, e.app.IO.In, stdout, stderr); err != nil { + // Don't return error for normal exit + fmt.Fprintf(stderr, "Application exited: %v\n", err) + } + } else { + fmt.Fprintf(stdout, "\nโ–ถ๏ธ Starting application: %s\n", cmdStr) + if err := sshClient.Execute(ctx, fullCmd, stdout, stderr); err != nil { + return nil, fmt.Errorf("application failed: %w", err) + } + } + } else { + fmt.Fprintln(stdout, "\nโœ… Setup complete (no CMD/ENTRYPOINT specified)") + fmt.Fprintf(stdout, " Connect with: vers connect %s\n", result.VMID) + } + + return result, nil +} + +// waitForSSH waits for SSH to become available on the VM +func (e *Executor) waitForSSH(ctx context.Context, client *sshutil.Client, stdout io.Writer) error { + maxAttempts := 60 + for i := 0; i < maxAttempts; i++ { + err := client.Execute(ctx, "echo ready", io.Discard, io.Discard) + if err == nil { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + // Wait before retrying + fmt.Fprint(stdout, ".") + } + } + fmt.Fprintln(stdout) + return fmt.Errorf("timeout waiting for VM SSH") +} + +// copyBuildContext copies files based on COPY instructions +func (e *Executor) copyBuildContext(ctx context.Context, client *sshutil.Client, buildContext, workdir string, df *Dockerfile, stdout, stderr io.Writer) error { + copies := df.GetCopyInstructions() + + if len(copies) == 0 { + // No COPY instructions, copy everything + fmt.Fprintln(stdout, " Copying entire build context...") + return e.copyDirectory(ctx, client, buildContext, workdir, stdout, stderr) + } + + for _, copy := range copies { + if len(copy.Args) < 2 { + continue + } + + src := copy.Args[0] + dst := copy.Args[len(copy.Args)-1] + + // Resolve source path relative to build context + srcPath := filepath.Join(buildContext, src) + + // Resolve destination path relative to workdir + dstPath := dst + if !filepath.IsAbs(dst) { + dstPath = filepath.Join(workdir, dst) + } + + fmt.Fprintf(stdout, " COPY %s -> %s\n", src, dstPath) + + // Use SFTP to copy + if err := e.copyPath(ctx, client, srcPath, dstPath, stdout, stderr); err != nil { + return fmt.Errorf("failed to copy %s: %w", src, err) + } + } + + return nil +} + +// copyDirectory copies an entire directory to the VM +func (e *Executor) copyDirectory(ctx context.Context, client *sshutil.Client, src, dst string, stdout, stderr io.Writer) error { + return client.Upload(ctx, src, dst, true) +} + +// copyPath copies a file or directory +func (e *Executor) copyPath(ctx context.Context, client *sshutil.Client, src, dst string, stdout, stderr io.Writer) error { + // Check if source is a directory or file + matches, err := filepath.Glob(src) + if err != nil || len(matches) == 0 { + // Try as exact path - check if it exists + info, statErr := os.Stat(src) + if statErr != nil { + return fmt.Errorf("source not found: %s", src) + } + return client.Upload(ctx, src, dst, info.IsDir()) + } + + // Handle glob patterns + for _, path := range matches { + info, err := os.Stat(path) + if err != nil { + continue + } + + dstPath := dst + if len(matches) > 1 { + // Multiple files, dst should be a directory + dstPath = filepath.Join(dst, filepath.Base(path)) + } + + if err := client.Upload(ctx, path, dstPath, info.IsDir()); err != nil { + return err + } + } + + return nil +} + +// setupEnvironment sets environment variables in /etc/environment +func (e *Executor) setupEnvironment(ctx context.Context, client *sshutil.Client, env map[string]string, additional []string, stderr io.Writer) error { + var envLines []string + + for key, value := range env { + envLines = append(envLines, fmt.Sprintf("%s=%q", key, value)) + } + + for _, envVar := range additional { + envLines = append(envLines, envVar) + } + + if len(envLines) == 0 { + return nil + } + + // Append to /etc/environment + cmd := fmt.Sprintf("cat >> /etc/environment << 'EOF'\n%s\nEOF", strings.Join(envLines, "\n")) + return client.Execute(ctx, cmd, io.Discard, stderr) +} + +// truncateString truncates a string with ellipsis +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} diff --git a/internal/docker/parser.go b/internal/docker/parser.go new file mode 100644 index 0000000..a7e49ee --- /dev/null +++ b/internal/docker/parser.go @@ -0,0 +1,254 @@ +package docker + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strings" +) + +// Instruction represents a single Dockerfile instruction +type Instruction struct { + Command string // FROM, RUN, COPY, WORKDIR, ENV, EXPOSE, CMD, ENTRYPOINT, etc. + Args []string // Arguments to the instruction + Raw string // Original line (for debugging) +} + +// Dockerfile represents a parsed Dockerfile +type Dockerfile struct { + Instructions []Instruction + BaseImage string // FROM image + WorkDir string // Current WORKDIR + Env map[string]string // Environment variables + ExposePorts []string // EXPOSE ports + Cmd []string // CMD instruction + Entrypoint []string // ENTRYPOINT instruction +} + +// ParseDockerfile reads and parses a Dockerfile from the given path +func ParseDockerfile(path string) (*Dockerfile, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open Dockerfile: %w", err) + } + defer file.Close() + + df := &Dockerfile{ + Instructions: []Instruction{}, + Env: make(map[string]string), + ExposePorts: []string{}, + } + + scanner := bufio.NewScanner(file) + var currentLine string + + for scanner.Scan() { + line := scanner.Text() + + // Handle line continuations + if strings.HasSuffix(line, "\\") { + currentLine += strings.TrimSuffix(line, "\\") + " " + continue + } + currentLine += line + + // Skip empty lines and comments + trimmed := strings.TrimSpace(currentLine) + if trimmed == "" || strings.HasPrefix(trimmed, "#") { + currentLine = "" + continue + } + + instr, err := parseInstruction(trimmed) + if err != nil { + currentLine = "" + continue // Skip invalid instructions + } + + df.Instructions = append(df.Instructions, instr) + + // Extract key information + switch instr.Command { + case "FROM": + if len(instr.Args) > 0 { + df.BaseImage = instr.Args[0] + } + case "WORKDIR": + if len(instr.Args) > 0 { + df.WorkDir = instr.Args[0] + } + case "ENV": + if len(instr.Args) >= 2 { + df.Env[instr.Args[0]] = strings.Join(instr.Args[1:], " ") + } else if len(instr.Args) == 1 && strings.Contains(instr.Args[0], "=") { + parts := strings.SplitN(instr.Args[0], "=", 2) + df.Env[parts[0]] = parts[1] + } + case "EXPOSE": + df.ExposePorts = append(df.ExposePorts, instr.Args...) + case "CMD": + df.Cmd = instr.Args + case "ENTRYPOINT": + df.Entrypoint = instr.Args + } + + currentLine = "" + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading Dockerfile: %w", err) + } + + if df.BaseImage == "" { + return nil, fmt.Errorf("Dockerfile must have a FROM instruction") + } + + return df, nil +} + +// parseInstruction parses a single Dockerfile instruction line +func parseInstruction(line string) (Instruction, error) { + // Match instruction pattern: INSTRUCTION args... + parts := strings.SplitN(line, " ", 2) + if len(parts) == 0 { + return Instruction{}, fmt.Errorf("empty instruction") + } + + cmd := strings.ToUpper(parts[0]) + var args []string + + if len(parts) > 1 { + argStr := strings.TrimSpace(parts[1]) + + // Handle JSON array format: ["arg1", "arg2"] + if strings.HasPrefix(argStr, "[") && strings.HasSuffix(argStr, "]") { + args = parseJSONArray(argStr) + } else { + // Handle shell format + args = parseShellArgs(argStr) + } + } + + return Instruction{ + Command: cmd, + Args: args, + Raw: line, + }, nil +} + +// parseJSONArray parses JSON array format arguments +func parseJSONArray(s string) []string { + // Remove brackets + s = strings.TrimPrefix(s, "[") + s = strings.TrimSuffix(s, "]") + + // Match quoted strings + re := regexp.MustCompile(`"([^"]*)"`) + matches := re.FindAllStringSubmatch(s, -1) + + var args []string + for _, match := range matches { + if len(match) > 1 { + args = append(args, match[1]) + } + } + return args +} + +// parseShellArgs parses shell-style arguments +func parseShellArgs(s string) []string { + // Simple split by spaces, but respect quotes + var args []string + var current strings.Builder + inQuote := false + quoteChar := rune(0) + + for _, ch := range s { + switch { + case (ch == '"' || ch == '\'') && !inQuote: + inQuote = true + quoteChar = ch + case ch == quoteChar && inQuote: + inQuote = false + quoteChar = 0 + case ch == ' ' && !inQuote: + if current.Len() > 0 { + args = append(args, current.String()) + current.Reset() + } + default: + current.WriteRune(ch) + } + } + if current.Len() > 0 { + args = append(args, current.String()) + } + + return args +} + +// GetRunCommands returns all RUN instruction commands +func (df *Dockerfile) GetRunCommands() []string { + var commands []string + for _, instr := range df.Instructions { + if instr.Command == "RUN" { + commands = append(commands, strings.Join(instr.Args, " ")) + } + } + return commands +} + +// GetCopyInstructions returns all COPY/ADD instructions +func (df *Dockerfile) GetCopyInstructions() []Instruction { + var copies []Instruction + for _, instr := range df.Instructions { + if instr.Command == "COPY" || instr.Command == "ADD" { + copies = append(copies, instr) + } + } + return copies +} + +// GetStartCommand returns the command to run (CMD or ENTRYPOINT) +func (df *Dockerfile) GetStartCommand() []string { + if len(df.Entrypoint) > 0 && len(df.Cmd) > 0 { + // Combine ENTRYPOINT with CMD as arguments + return append(df.Entrypoint, df.Cmd...) + } + if len(df.Entrypoint) > 0 { + return df.Entrypoint + } + return df.Cmd +} + +// ToSetupScript converts Dockerfile instructions to a bash setup script +func (df *Dockerfile) ToSetupScript() string { + var script strings.Builder + + script.WriteString("#!/bin/bash\nset -e\n\n") + + // Set environment variables + for key, value := range df.Env { + script.WriteString(fmt.Sprintf("export %s=%q\n", key, value)) + } + if len(df.Env) > 0 { + script.WriteString("\n") + } + + // Create and change to workdir + if df.WorkDir != "" { + script.WriteString(fmt.Sprintf("mkdir -p %s\n", df.WorkDir)) + script.WriteString(fmt.Sprintf("cd %s\n\n", df.WorkDir)) + } + + // Run commands + for _, instr := range df.Instructions { + if instr.Command == "RUN" { + script.WriteString(strings.Join(instr.Args, " ")) + script.WriteString("\n") + } + } + + return script.String() +} diff --git a/internal/docker/parser_test.go b/internal/docker/parser_test.go new file mode 100644 index 0000000..be551fe --- /dev/null +++ b/internal/docker/parser_test.go @@ -0,0 +1,296 @@ +package docker + +import ( + "os" + "path/filepath" + "testing" +) + +func TestParseInstruction(t *testing.T) { + tests := []struct { + name string + line string + wantCmd string + wantArgs []string + }{ + { + name: "FROM instruction", + line: "FROM node:18-alpine", + wantCmd: "FROM", + wantArgs: []string{"node:18-alpine"}, + }, + { + name: "WORKDIR instruction", + line: "WORKDIR /app", + wantCmd: "WORKDIR", + wantArgs: []string{"/app"}, + }, + { + name: "RUN instruction with shell command", + line: "RUN npm install", + wantCmd: "RUN", + wantArgs: []string{"npm", "install"}, + }, + { + name: "RUN instruction with complex command", + line: "RUN apt-get update && apt-get install -y curl", + wantCmd: "RUN", + wantArgs: []string{"apt-get", "update", "&&", "apt-get", "install", "-y", "curl"}, + }, + { + name: "COPY instruction", + line: "COPY package*.json ./", + wantCmd: "COPY", + wantArgs: []string{"package*.json", "./"}, + }, + { + name: "EXPOSE instruction", + line: "EXPOSE 3000", + wantCmd: "EXPOSE", + wantArgs: []string{"3000"}, + }, + { + name: "ENV instruction with equals", + line: "ENV NODE_ENV=production", + wantCmd: "ENV", + wantArgs: []string{"NODE_ENV=production"}, + }, + { + name: "ENV instruction with space", + line: "ENV NODE_ENV production", + wantCmd: "ENV", + wantArgs: []string{"NODE_ENV", "production"}, + }, + { + name: "CMD instruction JSON format", + line: `CMD ["npm", "start"]`, + wantCmd: "CMD", + wantArgs: []string{"npm", "start"}, + }, + { + name: "CMD instruction shell format", + line: "CMD npm start", + wantCmd: "CMD", + wantArgs: []string{"npm", "start"}, + }, + { + name: "ENTRYPOINT instruction JSON format", + line: `ENTRYPOINT ["node", "server.js"]`, + wantCmd: "ENTRYPOINT", + wantArgs: []string{"node", "server.js"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + instr, err := parseInstruction(tt.line) + if err != nil { + t.Fatalf("parseInstruction() error = %v", err) + } + if instr.Command != tt.wantCmd { + t.Errorf("parseInstruction() command = %v, want %v", instr.Command, tt.wantCmd) + } + if len(instr.Args) != len(tt.wantArgs) { + t.Errorf("parseInstruction() args = %v, want %v", instr.Args, tt.wantArgs) + return + } + for i, arg := range instr.Args { + if arg != tt.wantArgs[i] { + t.Errorf("parseInstruction() args[%d] = %v, want %v", i, arg, tt.wantArgs[i]) + } + } + }) + } +} + +func TestParseDockerfile(t *testing.T) { + // Create a temporary Dockerfile + content := `# Test Dockerfile +FROM node:18-alpine + +WORKDIR /app + +ENV NODE_ENV=production + +COPY package*.json ./ +RUN npm install + +COPY . . + +EXPOSE 3000 +EXPOSE 8080 + +CMD ["npm", "start"] +` + tmpDir := t.TempDir() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, []byte(content), 0644); err != nil { + t.Fatalf("Failed to write temp Dockerfile: %v", err) + } + + df, err := ParseDockerfile(dockerfilePath) + if err != nil { + t.Fatalf("ParseDockerfile() error = %v", err) + } + + // Check base image + if df.BaseImage != "node:18-alpine" { + t.Errorf("BaseImage = %v, want node:18-alpine", df.BaseImage) + } + + // Check workdir + if df.WorkDir != "/app" { + t.Errorf("WorkDir = %v, want /app", df.WorkDir) + } + + // Check environment variables + if df.Env["NODE_ENV"] != "production" { + t.Errorf("Env[NODE_ENV] = %v, want production", df.Env["NODE_ENV"]) + } + + // Check exposed ports + if len(df.ExposePorts) != 2 { + t.Errorf("len(ExposePorts) = %v, want 2", len(df.ExposePorts)) + } + + // Check CMD + if len(df.Cmd) != 2 || df.Cmd[0] != "npm" || df.Cmd[1] != "start" { + t.Errorf("Cmd = %v, want [npm start]", df.Cmd) + } + + // Check run commands + runCmds := df.GetRunCommands() + if len(runCmds) != 1 { + t.Errorf("len(GetRunCommands()) = %v, want 1", len(runCmds)) + } + + // Check copy instructions + copies := df.GetCopyInstructions() + if len(copies) != 2 { + t.Errorf("len(GetCopyInstructions()) = %v, want 2", len(copies)) + } +} + +func TestParseDockerfile_MultiStage(t *testing.T) { + content := `FROM node:18 AS builder +WORKDIR /build +COPY . . +RUN npm run build + +FROM node:18-alpine +WORKDIR /app +COPY --from=builder /build/dist ./dist +CMD ["node", "dist/index.js"] +` + tmpDir := t.TempDir() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, []byte(content), 0644); err != nil { + t.Fatalf("Failed to write temp Dockerfile: %v", err) + } + + df, err := ParseDockerfile(dockerfilePath) + if err != nil { + t.Fatalf("ParseDockerfile() error = %v", err) + } + + // For multi-stage builds, we use the last FROM (the final stage) + // since that's what represents the runtime image + if df.BaseImage != "node:18-alpine" { + t.Errorf("BaseImage = %v, want node:18-alpine", df.BaseImage) + } + + // Final workdir should be /app from the final stage + if df.WorkDir != "/app" { + t.Errorf("WorkDir = %v, want /app", df.WorkDir) + } +} + +func TestParseDockerfile_NoFROM(t *testing.T) { + content := `WORKDIR /app +RUN echo hello +` + tmpDir := t.TempDir() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, []byte(content), 0644); err != nil { + t.Fatalf("Failed to write temp Dockerfile: %v", err) + } + + _, err := ParseDockerfile(dockerfilePath) + if err == nil { + t.Error("ParseDockerfile() expected error for missing FROM, got nil") + } +} + +func TestParseDockerfile_LineContinuation(t *testing.T) { + content := `FROM ubuntu:22.04 +RUN apt-get update && \ + apt-get install -y curl && \ + rm -rf /var/lib/apt/lists/* +` + tmpDir := t.TempDir() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, []byte(content), 0644); err != nil { + t.Fatalf("Failed to write temp Dockerfile: %v", err) + } + + df, err := ParseDockerfile(dockerfilePath) + if err != nil { + t.Fatalf("ParseDockerfile() error = %v", err) + } + + runCmds := df.GetRunCommands() + if len(runCmds) != 1 { + t.Errorf("len(GetRunCommands()) = %v, want 1 (continuation should be joined)", len(runCmds)) + } +} + +func TestToSetupScript(t *testing.T) { + content := `FROM node:18 +WORKDIR /app +ENV NODE_ENV=production +RUN npm install +RUN npm run build +` + tmpDir := t.TempDir() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, []byte(content), 0644); err != nil { + t.Fatalf("Failed to write temp Dockerfile: %v", err) + } + + df, err := ParseDockerfile(dockerfilePath) + if err != nil { + t.Fatalf("ParseDockerfile() error = %v", err) + } + + script := df.ToSetupScript() + + // Check script contains expected content + if !contains(script, "#!/bin/bash") { + t.Error("Script should start with shebang") + } + if !contains(script, "set -e") { + t.Error("Script should have set -e") + } + if !contains(script, "NODE_ENV") { + t.Error("Script should set NODE_ENV") + } + if !contains(script, "mkdir -p /app") { + t.Error("Script should create workdir") + } + if !contains(script, "npm install") { + t.Error("Script should contain npm install") + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr)) +} + +func containsHelper(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/handlers/docker_build.go b/internal/handlers/docker_build.go new file mode 100644 index 0000000..b0d9ad1 --- /dev/null +++ b/internal/handlers/docker_build.go @@ -0,0 +1,297 @@ +package handlers + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hdresearch/vers-cli/internal/app" + "github.com/hdresearch/vers-cli/internal/docker" + "github.com/hdresearch/vers-cli/internal/presenters" + vmSvc "github.com/hdresearch/vers-cli/internal/services/vm" + sshutil "github.com/hdresearch/vers-cli/internal/ssh" + "github.com/hdresearch/vers-cli/internal/utils" + vers "github.com/hdresearch/vers-sdk-go" +) + +// DockerBuildReq contains the request parameters for docker build +type DockerBuildReq struct { + DockerfilePath string + BuildContext string + Tag string // Tag/name for the commit + MemSizeMib int64 + VcpuCount int64 + FsSizeMib int64 + NoCache bool + BuildArgs []string // Build arguments +} + +// HandleDockerBuild handles the vers docker build command +// It builds a Dockerfile and creates a Vers commit (snapshot) +func HandleDockerBuild(ctx context.Context, a *app.App, req DockerBuildReq) (presenters.DockerBuildView, error) { + view := presenters.DockerBuildView{} + + // Validate and normalize paths + dockerfilePath := req.DockerfilePath + if dockerfilePath == "" { + dockerfilePath = "Dockerfile" + } + + if !filepath.IsAbs(dockerfilePath) { + cwd, err := os.Getwd() + if err != nil { + return view, fmt.Errorf("failed to get current directory: %w", err) + } + dockerfilePath = filepath.Join(cwd, dockerfilePath) + } + + // Check if Dockerfile exists + if _, err := os.Stat(dockerfilePath); os.IsNotExist(err) { + return view, fmt.Errorf("Dockerfile not found: %s", dockerfilePath) + } + + // Set default build context + buildContext := req.BuildContext + if buildContext == "" { + buildContext = filepath.Dir(dockerfilePath) + } + if !filepath.IsAbs(buildContext) { + cwd, err := os.Getwd() + if err != nil { + return view, fmt.Errorf("failed to get current directory: %w", err) + } + buildContext = filepath.Join(cwd, buildContext) + } + + // Set defaults + memSize := req.MemSizeMib + if memSize == 0 { + memSize = 1024 + } + vcpuCount := req.VcpuCount + if vcpuCount == 0 { + vcpuCount = 2 + } + fsSize := req.FsSizeMib + if fsSize == 0 { + fsSize = 4096 + } + + // Parse Dockerfile + df, err := docker.ParseDockerfile(dockerfilePath) + if err != nil { + return view, fmt.Errorf("failed to parse Dockerfile: %w", err) + } + + view.BaseImage = df.BaseImage + view.NumLayers = len(df.GetRunCommands()) + + fmt.Fprintln(a.IO.Out, "๐Ÿ“„ Parsed Dockerfile") + fmt.Fprintf(a.IO.Out, " Base image: %s\n", df.BaseImage) + fmt.Fprintf(a.IO.Out, " Layers: %d RUN commands\n", view.NumLayers) + + // Step 1: Create VM + fmt.Fprintln(a.IO.Out, "\n๐Ÿš€ Creating build VM...") + + vmConfig := vers.NewRootRequestVmConfigParam{ + MemSizeMib: vers.F(memSize), + VcpuCount: vers.F(vcpuCount), + FsSizeMib: vers.F(fsSize), + } + + body := vers.VmNewRootParams{ + NewRootRequest: vers.NewRootRequestParam{ + VmConfig: vers.F(vmConfig), + }, + } + + resp, err := a.Client.Vm.NewRoot(ctx, body) + if err != nil { + return view, fmt.Errorf("failed to create VM: %w", err) + } + + view.VMID = resp.VmID + fmt.Fprintf(a.IO.Out, " VM: %s\n", view.VMID) + + // Step 2: Get SSH connection + info, err := vmSvc.GetConnectInfo(ctx, a.Client, view.VMID) + if err != nil { + return view, fmt.Errorf("failed to get VM connection info: %w", err) + } + + sshClient := sshutil.NewClient(info.Host, info.KeyPath, info.VMDomain) + + // Wait for VM + fmt.Fprintln(a.IO.Out, "\nโณ Waiting for VM...") + if err := waitForSSH(ctx, sshClient, a.IO.Out); err != nil { + return view, fmt.Errorf("VM not ready: %w", err) + } + + // Step 3: Setup workdir + workdir := df.WorkDir + if workdir == "" { + workdir = "/app" + } + + fmt.Fprintf(a.IO.Out, "\n๐Ÿ“ Setting up workdir: %s\n", workdir) + if err := sshClient.Execute(ctx, fmt.Sprintf("mkdir -p %s", workdir), a.IO.Out, a.IO.Err); err != nil { + return view, fmt.Errorf("failed to create workdir: %w", err) + } + + // Step 4: Copy build context + if buildContext != "" { + fmt.Fprintf(a.IO.Out, "\n๐Ÿ“ฆ Copying build context from: %s\n", buildContext) + if err := copyBuildContext(ctx, sshClient, buildContext, workdir, df, a.IO.Out, a.IO.Err); err != nil { + return view, fmt.Errorf("failed to copy build context: %w", err) + } + } + + // Step 5: Set environment variables + if len(df.Env) > 0 { + fmt.Fprintln(a.IO.Out, "\n๐Ÿ”ง Setting environment variables...") + if err := setupEnvironment(ctx, sshClient, df.Env, req.BuildArgs, a.IO.Err); err != nil { + return view, fmt.Errorf("failed to set environment: %w", err) + } + } + + // Step 6: Execute RUN commands + runCommands := df.GetRunCommands() + if len(runCommands) > 0 { + fmt.Fprintf(a.IO.Out, "\n๐Ÿ”จ Executing %d build steps...\n", len(runCommands)) + for i, cmd := range runCommands { + fmt.Fprintf(a.IO.Out, " [%d/%d] %s\n", i+1, len(runCommands), truncateString(cmd, 60)) + fullCmd := fmt.Sprintf("cd %s && %s", workdir, cmd) + if err := sshClient.Execute(ctx, fullCmd, a.IO.Out, a.IO.Err); err != nil { + return view, fmt.Errorf("build step %d failed: %w", i+1, err) + } + } + } + view.TotalSteps = len(runCommands) + + // Step 7: Create commit (snapshot) + fmt.Fprintln(a.IO.Out, "\n๐Ÿ“ธ Creating snapshot...") + + commitResp, err := a.Client.Vm.Commit(ctx, view.VMID, vers.VmCommitParams{}) + if err != nil { + return view, fmt.Errorf("failed to create commit: %w", err) + } + + view.CommitID = commitResp.CommitID + fmt.Fprintf(a.IO.Out, " Commit: %s\n", view.CommitID) + + // Save tag as alias for the commit if provided + if req.Tag != "" { + // Store tag -> commit mapping in aliases + if err := utils.SetAlias(fmt.Sprintf("image:%s", req.Tag), view.CommitID); err != nil { + fmt.Fprintf(a.IO.Err, "Warning: could not save image tag: %v\n", err) + } + view.Tag = req.Tag + } + + // Clean up: delete the build VM (keep only the commit) + fmt.Fprintln(a.IO.Out, "\n๐Ÿงน Cleaning up build VM...") + if _, err := a.Client.Vm.Delete(ctx, view.VMID, vers.VmDeleteParams{}); err != nil { + fmt.Fprintf(a.IO.Err, "Warning: could not delete build VM: %v\n", err) + } + + return view, nil +} + +// waitForSSH waits for SSH to become available +func waitForSSH(ctx context.Context, client *sshutil.Client, stdout io.Writer) error { + maxAttempts := 60 + for i := 0; i < maxAttempts; i++ { + err := client.Execute(ctx, "echo ready", io.Discard, io.Discard) + if err == nil { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + fmt.Fprint(stdout, ".") + } + } + fmt.Fprintln(stdout) + return fmt.Errorf("timeout waiting for VM SSH") +} + +// copyBuildContext copies files based on COPY instructions +func copyBuildContext(ctx context.Context, client *sshutil.Client, buildContext, workdir string, df *docker.Dockerfile, stdout, stderr io.Writer) error { + copies := df.GetCopyInstructions() + + if len(copies) == 0 { + fmt.Fprintln(stdout, " Copying entire build context...") + return client.Upload(ctx, buildContext, workdir, true) + } + + for _, copy := range copies { + if len(copy.Args) < 2 { + continue + } + src := copy.Args[0] + dst := copy.Args[len(copy.Args)-1] + + srcPath := filepath.Join(buildContext, src) + dstPath := dst + if !filepath.IsAbs(dst) { + dstPath = filepath.Join(workdir, dst) + } + + fmt.Fprintf(stdout, " COPY %s -> %s\n", src, dstPath) + + info, err := os.Stat(srcPath) + if err != nil { + // Try glob + matches, _ := filepath.Glob(srcPath) + if len(matches) == 0 { + return fmt.Errorf("source not found: %s", src) + } + for _, m := range matches { + mInfo, _ := os.Stat(m) + if err := client.Upload(ctx, m, filepath.Join(dstPath, filepath.Base(m)), mInfo != nil && mInfo.IsDir()); err != nil { + return err + } + } + continue + } + + if err := client.Upload(ctx, srcPath, dstPath, info.IsDir()); err != nil { + return err + } + } + + return nil +} + +// setupEnvironment sets environment variables in /etc/environment +func setupEnvironment(ctx context.Context, client *sshutil.Client, env map[string]string, additional []string, stderr io.Writer) error { + var envLines []string + + for key, value := range env { + envLines = append(envLines, fmt.Sprintf("%s=%q", key, value)) + } + + for _, envVar := range additional { + envLines = append(envLines, envVar) + } + + if len(envLines) == 0 { + return nil + } + + cmd := fmt.Sprintf("cat >> /etc/environment << 'EOF'\n%s\nEOF", strings.Join(envLines, "\n")) + return client.Execute(ctx, cmd, io.Discard, stderr) +} + +// truncateString truncates a string with ellipsis +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} diff --git a/internal/handlers/docker_compose.go b/internal/handlers/docker_compose.go new file mode 100644 index 0000000..b4ca598 --- /dev/null +++ b/internal/handlers/docker_compose.go @@ -0,0 +1,117 @@ +package handlers + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/hdresearch/vers-cli/internal/app" + "github.com/hdresearch/vers-cli/internal/docker" + "github.com/hdresearch/vers-cli/internal/presenters" +) + +// DockerComposeUpReq contains the request parameters for docker compose up +type DockerComposeUpReq struct { + ComposePath string // Path to docker-compose.yml + ProjectName string // Project name (defaults to directory name) + Detach bool // Run in detached mode + Services []string // Specific services to run (empty = all) + NoDeps bool // Don't start dependencies + EnvVars []string // Additional environment variables +} + +// HandleDockerComposeUp handles the vers docker compose up command +func HandleDockerComposeUp(ctx context.Context, a *app.App, req DockerComposeUpReq) (presenters.DockerComposeView, error) { + view := presenters.DockerComposeView{} + + // Find compose file + composePath := req.ComposePath + if composePath == "" { + // Look for common compose file names + candidates := []string{ + "docker-compose.yml", + "docker-compose.yaml", + "compose.yml", + "compose.yaml", + } + for _, c := range candidates { + if _, err := os.Stat(c); err == nil { + composePath = c + break + } + } + if composePath == "" { + return view, fmt.Errorf("no compose file found (tried docker-compose.yml, compose.yml)") + } + } + + // Make path absolute + if !filepath.IsAbs(composePath) { + cwd, err := os.Getwd() + if err != nil { + return view, fmt.Errorf("failed to get current directory: %w", err) + } + composePath = filepath.Join(cwd, composePath) + } + + // Check if file exists + if _, err := os.Stat(composePath); os.IsNotExist(err) { + return view, fmt.Errorf("compose file not found: %s", composePath) + } + + // Determine project name + projectName := req.ProjectName + if projectName == "" { + projectName = filepath.Base(filepath.Dir(composePath)) + } + + // Create executor and run + executor := docker.NewComposeExecutor(a) + + cfg := docker.ComposeConfig{ + ComposePath: composePath, + ProjectName: projectName, + Detach: req.Detach, + Services: req.Services, + NoDeps: req.NoDeps, + EnvVars: req.EnvVars, + } + + result, err := executor.Up(ctx, cfg, a.IO.Out, a.IO.Err) + if err != nil { + return view, err + } + + // Convert result to view + view.ProjectName = result.ProjectName + view.TotalServices = result.TotalServices + + for _, svc := range result.Services { + svcView := presenters.ComposeServiceView{ + Name: svc.Name, + VMID: svc.VMID, + VMAlias: svc.VMAlias, + Ports: svc.Ports, + Running: svc.Running, + SetupComplete: svc.SetupComplete, + } + if svc.Error != nil { + svcView.Error = svc.Error.Error() + } + view.Services = append(view.Services, svcView) + } + + return view, nil +} + +// DockerComposePsReq contains the request parameters for docker compose ps +type DockerComposePsReq struct { + ProjectName string +} + +// DockerComposeDownReq contains the request parameters for docker compose down +type DockerComposeDownReq struct { + ProjectName string + RemoveAll bool // Also remove volumes +} diff --git a/internal/handlers/docker_run.go b/internal/handlers/docker_run.go new file mode 100644 index 0000000..88dc4db --- /dev/null +++ b/internal/handlers/docker_run.go @@ -0,0 +1,111 @@ +package handlers + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/hdresearch/vers-cli/internal/app" + "github.com/hdresearch/vers-cli/internal/docker" + "github.com/hdresearch/vers-cli/internal/presenters" +) + +// DockerRunReq contains the request parameters for docker run +type DockerRunReq struct { + DockerfilePath string + BuildContext string + MemSizeMib int64 + VcpuCount int64 + FsSizeMib int64 + VMAlias string + Detach bool + PortMappings []string + EnvVars []string + Interactive bool +} + +// HandleDockerRun handles the vers docker run command +func HandleDockerRun(ctx context.Context, a *app.App, req DockerRunReq) (presenters.DockerRunView, error) { + view := presenters.DockerRunView{} + + // Validate and normalize paths + dockerfilePath := req.DockerfilePath + if dockerfilePath == "" { + dockerfilePath = "Dockerfile" + } + + // Make path absolute + if !filepath.IsAbs(dockerfilePath) { + cwd, err := os.Getwd() + if err != nil { + return view, fmt.Errorf("failed to get current directory: %w", err) + } + dockerfilePath = filepath.Join(cwd, dockerfilePath) + } + + // Check if Dockerfile exists + if _, err := os.Stat(dockerfilePath); os.IsNotExist(err) { + return view, fmt.Errorf("Dockerfile not found: %s", dockerfilePath) + } + + // Set default build context to Dockerfile directory + buildContext := req.BuildContext + if buildContext == "" { + buildContext = filepath.Dir(dockerfilePath) + } + if !filepath.IsAbs(buildContext) { + cwd, err := os.Getwd() + if err != nil { + return view, fmt.Errorf("failed to get current directory: %w", err) + } + buildContext = filepath.Join(cwd, buildContext) + } + + // Set default VM resources + memSize := req.MemSizeMib + if memSize == 0 { + memSize = 1024 // 1GB default + } + + vcpuCount := req.VcpuCount + if vcpuCount == 0 { + vcpuCount = 2 // 2 vCPUs default + } + + fsSize := req.FsSizeMib + if fsSize == 0 { + fsSize = 4096 // 4GB default + } + + // Create executor and run + executor := docker.NewExecutor(a) + + cfg := docker.RunConfig{ + DockerfilePath: dockerfilePath, + BuildContext: buildContext, + MemSizeMib: memSize, + VcpuCount: vcpuCount, + FsSizeMib: fsSize, + VMAlias: req.VMAlias, + Detach: req.Detach, + PortMappings: req.PortMappings, + EnvVars: req.EnvVars, + Interactive: req.Interactive, + } + + result, err := executor.Run(ctx, cfg, a.IO.Out, a.IO.Err) + if err != nil { + return view, err + } + + view.VMID = result.VMID + view.VMAlias = result.VMAlias + view.BaseImage = result.Dockerfile.BaseImage + view.ExposedPorts = result.ExposedPorts + view.StartCommand = result.StartCommand + view.SetupComplete = result.SetupComplete + view.Running = result.Running + + return view, nil +} diff --git a/internal/presenters/docker_presenter.go b/internal/presenters/docker_presenter.go new file mode 100644 index 0000000..1721ddb --- /dev/null +++ b/internal/presenters/docker_presenter.go @@ -0,0 +1,191 @@ +package presenters + +import ( + "fmt" + "strings" + + "github.com/hdresearch/vers-cli/internal/app" +) + +// DockerRunView contains the result of a docker run command +type DockerRunView struct { + VMID string + VMAlias string + BaseImage string + ExposedPorts []string + StartCommand []string + SetupComplete bool + Running bool +} + +// RenderDockerRun renders the docker run result +func RenderDockerRun(a *app.App, v DockerRunView) { + fmt.Println() + fmt.Println("โœ… Dockerfile executed successfully on Vers VM") + fmt.Println() + + // VM Info + fmt.Printf(" VM ID: %s\n", v.VMID) + if v.VMAlias != "" { + fmt.Printf(" Alias: %s\n", v.VMAlias) + } + fmt.Printf(" Base: %s (translated to Vers VM)\n", v.BaseImage) + + // Ports + if len(v.ExposedPorts) > 0 { + fmt.Printf(" Ports: %s\n", strings.Join(v.ExposedPorts, ", ")) + fmt.Println() + fmt.Println(" ๐ŸŒ To access exposed ports, use:") + fmt.Printf(" vers tunnel %s \n", v.VMID) + } + + // Status + fmt.Println() + if v.Running { + fmt.Println(" Status: Application running in background") + fmt.Println(" View logs: vers execute " + v.VMID + " cat /tmp/app.log") + } else if v.SetupComplete { + fmt.Println(" Status: Setup complete") + } + + // Next steps + fmt.Println() + fmt.Println(" Next steps:") + fmt.Printf(" vers connect %s # SSH into the VM\n", v.VMID) + fmt.Printf(" vers kill %s # Stop the VM\n", v.VMID) + if len(v.ExposedPorts) > 0 { + fmt.Printf(" vers tunnel %s %s # Forward port\n", v.VMID, v.ExposedPorts[0]) + } +} + +// DockerBuildView contains the result of a docker build command +type DockerBuildView struct { + VMID string + CommitID string + BaseImage string + NumLayers int + TotalSteps int + Tag string +} + +// RenderDockerBuild renders the docker build result +func RenderDockerBuild(a *app.App, v DockerBuildView) { + fmt.Println() + fmt.Println("โœ… Docker image built as Vers snapshot") + fmt.Println() + fmt.Printf(" Commit ID: %s\n", v.CommitID) + if v.Tag != "" { + fmt.Printf(" Tag: %s\n", v.Tag) + } + fmt.Printf(" Base: %s\n", v.BaseImage) + fmt.Printf(" Layers: %d steps executed\n", v.TotalSteps) + fmt.Println() + fmt.Println(" To run this image:") + fmt.Printf(" vers checkout %s\n", v.CommitID) + if v.Tag != "" { + fmt.Println() + fmt.Println(" Or by tag:") + fmt.Printf(" vers checkout image:%s\n", v.Tag) + } +} + +// ComposeServiceView contains the result of starting a single compose service +type ComposeServiceView struct { + Name string + VMID string + VMAlias string + Ports []string + Running bool + SetupComplete bool + Error string +} + +// DockerComposeView contains the result of a docker compose up command +type DockerComposeView struct { + ProjectName string + TotalServices int + Services []ComposeServiceView +} + +// RenderDockerComposeUp renders the docker compose up result +func RenderDockerComposeUp(a *app.App, v DockerComposeView) { + fmt.Println() + fmt.Printf("โœ… Docker Compose project '%s' started on Vers VMs\n", v.ProjectName) + fmt.Println() + + // Count successful services + successCount := 0 + for _, svc := range v.Services { + if svc.Error == "" && svc.VMID != "" { + successCount++ + } + } + + fmt.Printf(" Services: %d/%d running\n", successCount, v.TotalServices) + fmt.Println() + + // List services + fmt.Println(" โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”") + fmt.Println(" โ”‚ Service โ”‚ VM ID โ”‚ Status โ”‚") + fmt.Println(" โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค") + + for _, svc := range v.Services { + status := "โŒ Failed" + if svc.Error == "" { + if svc.Running { + status = "โ–ถ๏ธ Running" + } else if svc.SetupComplete { + status = "โœ… Ready" + } + } + + vmID := svc.VMID + if len(vmID) > 18 { + vmID = vmID[:15] + "..." + } + name := svc.Name + if len(name) > 17 { + name = name[:14] + "..." + } + + fmt.Printf(" โ”‚ %-17s โ”‚ %-18s โ”‚ %-18s โ”‚\n", name, vmID, status) + } + + fmt.Println(" โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜") + + // Show errors if any + hasErrors := false + for _, svc := range v.Services { + if svc.Error != "" { + if !hasErrors { + fmt.Println() + fmt.Println(" Errors:") + hasErrors = true + } + fmt.Printf(" - %s: %s\n", svc.Name, svc.Error) + } + } + + // Show exposed ports + hasPorts := false + for _, svc := range v.Services { + if len(svc.Ports) > 0 && svc.Error == "" { + if !hasPorts { + fmt.Println() + fmt.Println(" ๐ŸŒ To access exposed ports:") + hasPorts = true + } + for _, port := range svc.Ports { + fmt.Printf(" vers tunnel %s %s # %s\n", svc.VMAlias, port, svc.Name) + } + } + } + + // Next steps + fmt.Println() + fmt.Println(" Next steps:") + if len(v.Services) > 0 && v.Services[0].VMAlias != "" { + fmt.Printf(" vers connect %s # SSH into a service\n", v.Services[0].VMAlias) + } + fmt.Printf(" vers status # List all VMs\n") +}