diff --git a/cmd/cmd.go b/cmd/cmd.go index 014471f763..d8a8c09f04 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -17,6 +17,7 @@ import ( "github.com/databricks/cli/cmd/experimental" "github.com/databricks/cli/cmd/fs" "github.com/databricks/cli/cmd/labs" + "github.com/databricks/cli/cmd/lakebox" "github.com/databricks/cli/cmd/pipelines" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/selftest" @@ -103,6 +104,7 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(configure.New()) cli.AddCommand(fs.New()) cli.AddCommand(labs.New(ctx)) + cli.AddCommand(lakebox.New()) cli.AddCommand(sync.New()) cli.AddCommand(version.New()) cli.AddCommand(selftest.New()) diff --git a/cmd/fuzz_panic_test.go b/cmd/fuzz_panic_test.go index 4fb5d5b9d3..e4037b4ef8 100644 --- a/cmd/fuzz_panic_test.go +++ b/cmd/fuzz_panic_test.go @@ -208,6 +208,7 @@ func isAutoGenerated(leaf leafCommand) bool { "configure": true, "experimental": true, "labs": true, + "lakebox": true, "pipelines": true, "psql": true, "selftest": true, diff --git a/cmd/lakebox/api.go b/cmd/lakebox/api.go new file mode 100644 index 0000000000..754da218ec --- /dev/null +++ b/cmd/lakebox/api.go @@ -0,0 +1,242 @@ +package lakebox + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/client" +) + +// Sandboxes live under the `/sandboxes` sub-collection of the lakebox service +// namespace (see `lakebox.proto` `LakeboxService.CreateSandbox`). +const lakeboxAPIPath = "/api/2.0/lakebox/sandboxes" + +// SSH keys are nested under the lakebox service namespace alongside +// `sandboxes/` (see `LakeboxService.CreateSshKey`). +const lakeboxKeysAPIPath = "/api/2.0/lakebox/ssh-keys" + +// orgIDHeader is sent by multi-workspace gateways (e.g. dogfood staging) so +// the gateway can scope the credential to a specific workspace. Without it, +// requests fail with "Credential was not sent or was of an unsupported type +// for this API." +const orgIDHeader = "X-Databricks-Org-Id" + +// lakeboxAPI wraps the SDK ApiClient with workspace-id-aware request headers. +type lakeboxAPI struct { + c *client.DatabricksClient +} + +// createRequest is the JSON body for POST /api/2.0/lakebox/sandboxes. +// +// The proto-defined `CreateSandboxRequest` carries a `Sandbox sandbox = 1` +// field today (every member is server-chosen), but JSON transcoding accepts +// the unwrapped form for forward-compatible callers. Keep `public_key` here +// as a no-op compat shim so older `lakebox create --public-key-file=...` +// invocations don't error — the manager ignores it on the wire. +type createRequest struct { + PublicKey string `json:"public_key,omitempty"` +} + +// createResponse is the JSON body returned by POST /api/2.0/lakebox/sandboxes. +// Mirrors the `Sandbox` proto message after JSON transcoding. +type createResponse struct { + SandboxID string `json:"sandboxId"` + Status string `json:"status"` + FQDN string `json:"fqdn"` +} + +// sandboxEntry is a single item in the list response. +// Mirrors the `Sandbox` proto message after JSON transcoding. +// +// IdleTimeout and NoAutostop correspond to the proto's `optional` fields; +// they're pointers so we can tell "field absent on the wire" (server has +// the global default) from "explicitly set to 0 / false." +// +// `IdleTimeout` is a `google.protobuf.Duration`. Proto3 JSON canonical +// form serializes Duration as a string with an `s` suffix (e.g. +// `"900s"`), so the Go field is `*string` and we parse on read. +type sandboxEntry struct { + SandboxID string `json:"sandboxId"` + Status string `json:"status"` + FQDN string `json:"fqdn"` + IdleTimeout *string `json:"idleTimeout,omitempty"` + NoAutostop *bool `json:"noAutostop,omitempty"` +} + +// idleTimeoutSecs parses the proto3-canonical Duration string off +// `IdleTimeout` (e.g. `"900s"` → `900`). Returns 0 when unset or when +// the string is not a recognizable Duration. Sub-second precision is +// dropped — the watchdog only acts on whole seconds. +func (e *sandboxEntry) idleTimeoutSecs() int64 { + if e.IdleTimeout == nil { + return 0 + } + s := *e.IdleTimeout + if !strings.HasSuffix(s, "s") { + return 0 + } + d, err := time.ParseDuration(s) + if err != nil { + return 0 + } + return int64(d.Seconds()) +} + +// defaultAutoStopSecs mirrors the manager's `watchdog_idle_grace_secs` +// fallback (10 minutes) used when a sandbox has no per-record override. +// The value is also documented in `lakebox/CLAUDE.md` ("Sandbox +// Watchdog" section). Hardcoded here so list/status can render the +// effective timeout without an extra round-trip to fetch manager config. +const defaultAutoStopSecs int64 = 600 + +// autoStopLabel renders the auto-stop policy advertised by the manager +// for one sandbox into a short human-readable string. Mirrors the wire +// semantics from `lakebox/proto/lakebox.proto`: +// - `no_autostop == true` → never auto-stops +// - `idle_timeout` set and positive → that many seconds +// - otherwise → manager's global default (`defaultAutoStopSecs`) +func (e *sandboxEntry) autoStopLabel() string { + if e.NoAutostop != nil && *e.NoAutostop { + return "never" + } + if secs := e.idleTimeoutSecs(); secs > 0 { + return formatDurationSecs(secs) + } + return formatDurationSecs(defaultAutoStopSecs) +} + +// formatDurationSecs prints `secs` as a compact duration (e.g. `90s`, +// `15m`, `2h`, `1h30m`). Falls back to seconds if it's not a clean +// minute/hour multiple. Avoids pulling in a dependency just for this. +func formatDurationSecs(secs int64) string { + if secs < 60 { + return fmt.Sprintf("%ds", secs) + } + if secs%3600 == 0 { + return fmt.Sprintf("%dh", secs/3600) + } + if secs >= 3600 { + return fmt.Sprintf("%dh%dm", secs/3600, (secs%3600)/60) + } + if secs%60 == 0 { + return fmt.Sprintf("%dm", secs/60) + } + return fmt.Sprintf("%ds", secs) +} + +// listResponse is the JSON body returned by GET /api/2.0/lakebox/sandboxes. +type listResponse struct { + Sandboxes []sandboxEntry `json:"sandboxes"` +} + +// updateBody is the PATCH request body. The proto declares +// `UpdateSandboxRequest { Sandbox sandbox = 1 }` with `body: "sandbox"` +// in the (google.api.http) annotation, so the HTTP body is the inner +// `Sandbox` message directly — there is no `{"sandbox": {...}}` +// wrapping on the wire. +// +// Pointer fields encode the proto3 `optional` semantics — only the +// fields we explicitly set are emitted, leaving everything else +// server-untouched. `IdleTimeout` is a proto3-canonical Duration +// string (e.g. `"900s"`); the server-side wire type is +// `google.protobuf.Duration`. +type updateBody struct { + SandboxID string `json:"sandbox_id"` + IdleTimeout *string `json:"idle_timeout,omitempty"` + NoAutostop *bool `json:"no_autostop,omitempty"` +} + +// registerKeyRequest is the JSON body for POST /api/2.0/lakebox/ssh-keys. +type registerKeyRequest struct { + PublicKey string `json:"public_key"` + Name string `json:"name,omitempty"` +} + +func newLakeboxAPI(w *databricks.WorkspaceClient) (*lakeboxAPI, error) { + c, err := client.New(w.Config) + if err != nil { + return nil, fmt.Errorf("failed to create lakebox API client: %w", err) + } + return &lakeboxAPI{c: c}, nil +} + +// headers attaches the workspace routing identifier so multi-workspace +// gateways (e.g. SPOG hosts) can scope the credential. Mirrors the pattern +// in libs/telemetry, libs/filer, and SDK-generated workspace services. The +// auth.WorkspaceIDNone sentinel ("none") is treated as unset so the literal +// string never goes on the wire. +func (a *lakeboxAPI) headers() map[string]string { + wsID := a.c.Config.WorkspaceID + if wsID == "" || wsID == auth.WorkspaceIDNone { + return nil + } + return map[string]string{orgIDHeader: wsID} +} + +// create calls POST /api/2.0/lakebox/sandboxes with an optional public key. +func (a *lakeboxAPI) create(ctx context.Context, publicKey string) (*createResponse, error) { + var resp createResponse + err := a.c.Do(ctx, http.MethodPost, lakeboxAPIPath, a.headers(), nil, createRequest{PublicKey: publicKey}, &resp) + if err != nil { + return nil, err + } + return &resp, nil +} + +// list calls GET /api/2.0/lakebox/sandboxes. +func (a *lakeboxAPI) list(ctx context.Context) ([]sandboxEntry, error) { + var resp listResponse + err := a.c.Do(ctx, http.MethodGet, lakeboxAPIPath, a.headers(), nil, nil, &resp) + if err != nil { + return nil, err + } + return resp.Sandboxes, nil +} + +// get calls GET /api/2.0/lakebox/sandboxes/{id}. +func (a *lakeboxAPI) get(ctx context.Context, id string) (*sandboxEntry, error) { + var resp sandboxEntry + err := a.c.Do(ctx, http.MethodGet, lakeboxAPIPath+"/"+id, a.headers(), nil, nil, &resp) + if err != nil { + return nil, err + } + return &resp, nil +} + +// update calls PATCH /api/2.0/lakebox/sandboxes/{id} with whichever of +// `idle_timeout` / `no_autostop` the caller chose to set. Fields left +// nil are omitted from the wire payload, so the server preserves their +// current values. Returns the refreshed `sandboxEntry`. +func (a *lakeboxAPI) update(ctx context.Context, id string, idleTimeoutSecs *int64, noAutostop *bool) (*sandboxEntry, error) { + var idleTimeout *string + if idleTimeoutSecs != nil { + s := fmt.Sprintf("%ds", *idleTimeoutSecs) + idleTimeout = &s + } + body := updateBody{ + SandboxID: id, + IdleTimeout: idleTimeout, + NoAutostop: noAutostop, + } + var resp sandboxEntry + err := a.c.Do(ctx, http.MethodPatch, lakeboxAPIPath+"/"+id, a.headers(), nil, body, &resp) + if err != nil { + return nil, err + } + return &resp, nil +} + +// delete calls DELETE /api/2.0/lakebox/sandboxes/{id}. +func (a *lakeboxAPI) delete(ctx context.Context, id string) error { + return a.c.Do(ctx, http.MethodDelete, lakeboxAPIPath+"/"+id, a.headers(), nil, nil, nil) +} + +// registerKey calls POST /api/2.0/lakebox/ssh-keys. +func (a *lakeboxAPI) registerKey(ctx context.Context, publicKey string) error { + return a.c.Do(ctx, http.MethodPost, lakeboxKeysAPIPath, a.headers(), nil, registerKeyRequest{PublicKey: publicKey}, nil) +} diff --git a/cmd/lakebox/config.go b/cmd/lakebox/config.go new file mode 100644 index 0000000000..2861930cc6 --- /dev/null +++ b/cmd/lakebox/config.go @@ -0,0 +1,136 @@ +package lakebox + +import ( + "errors" + "fmt" + "time" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +// MIN_IDLE_TIMEOUT_SECS / MAX_IDLE_TIMEOUT_SECS mirror the manager-side +// constants in lakebox/src/api/handlers/sandbox.rs. Pre-flighting client-side +// gives a clearer error than waiting for the server's INVALID_ARGUMENT. +const ( + minIdleTimeoutSecs = 60 + maxIdleTimeoutSecs = 86_400 +) + +func newConfigCommand() *cobra.Command { + var idleTimeoutFlag string + var noAutostopFlag bool + + cmd := &cobra.Command{ + Use: "config ", + Short: "Configure a Lakebox's auto-stop policy", + Long: `Configure a Lakebox's auto-stop policy. + +Two knobs are independent — pass either or both: + + --idle-timeout Per-sandbox idle timeout. The watchdog reaps + the sandbox after this much idle time. Pass + 0 (or 0s) to clear and revert to the manager's + global default (10m). Valid range when set: + 60s to 24h. + + --no-autostop[=true|false] When true, the sandbox is exempt from + idle-driven auto-stop entirely. The + --idle-timeout setting is ignored while + this is on. Setting --idle-timeout to a + non-zero value in a later call clears + --no-autostop automatically. Sandbox still + stops on explicit 'databricks lakebox delete'. + +Examples: + databricks lakebox config happy-panda-1234 --idle-timeout 15m + databricks lakebox config happy-panda-1234 --idle-timeout 1h30m + databricks lakebox config happy-panda-1234 --idle-timeout 0 # clear, use default + databricks lakebox config happy-panda-1234 --no-autostop # never auto-stop + databricks lakebox config happy-panda-1234 --no-autostop=false # back to timeout path + databricks lakebox config happy-panda-1234 --idle-timeout 30m --no-autostop=false`, + Args: cobra.ExactArgs(1), + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + out := cmd.OutOrStdout() + + id := args[0] + + // Translate flag presence + value into the proto3 + // optional-field semantics the server expects. + var idleSecs *int64 + if cmd.Flags().Changed("idle-timeout") { + secs, err := parseIdleTimeoutFlag(idleTimeoutFlag) + if err != nil { + return err + } + idleSecs = &secs + } + + var noAutostop *bool + if cmd.Flags().Changed("no-autostop") { + p := noAutostopFlag + noAutostop = &p + } + + if idleSecs == nil && noAutostop == nil { + return errors.New("nothing to update — pass --idle-timeout and/or --no-autostop") + } + + updated, err := api.update(ctx, id, idleSecs, noAutostop) + if err != nil { + return fmt.Errorf("failed to update lakebox %s: %w", id, err) + } + + blank(out) + field(ctx, out, "id", cmdio.Bold(ctx, updated.SandboxID)) + field(ctx, out, "autostop", cmdio.Dim(ctx, updated.autoStopLabel())) + blank(out) + return nil + }, + } + + cmd.Flags().StringVar(&idleTimeoutFlag, "idle-timeout", "", + "Idle timeout (e.g. 15m, 1h30m, 90s). Pass 0 to clear and revert to the manager's default.") + cmd.Flags().BoolVar(&noAutostopFlag, "no-autostop", false, + "When true, this sandbox never auto-stops on idle. Pass --no-autostop=false to revert.") + + return cmd +} + +// parseIdleTimeoutFlag accepts the same syntax as time.ParseDuration plus +// the special-case "0" / "0s" → clear. Anything else outside the +// [60s, 86400s] window is rejected client-side. +func parseIdleTimeoutFlag(raw string) (int64, error) { + d, err := time.ParseDuration(raw) + if err != nil { + // Allow bare integer seconds as a convenience (`--idle-timeout 900`). + var secs int64 + if _, e2 := fmt.Sscanf(raw, "%d", &secs); e2 == nil { + return checkIdleSecs(secs) + } + return 0, fmt.Errorf("invalid --idle-timeout %q: %w (use Go duration syntax, e.g. 15m, 1h30m)", raw, err) + } + return checkIdleSecs(int64(d.Seconds())) +} + +func checkIdleSecs(secs int64) (int64, error) { + if secs == 0 { + return 0, nil // clear / revert to global default + } + if secs < minIdleTimeoutSecs || secs > maxIdleTimeoutSecs { + return 0, fmt.Errorf( + "idle-timeout must be 0 (clear) or between %ds and %ds, got %ds", + minIdleTimeoutSecs, maxIdleTimeoutSecs, secs, + ) + } + return secs, nil +} diff --git a/cmd/lakebox/create.go b/cmd/lakebox/create.go new file mode 100644 index 0000000000..ea5c47cac6 --- /dev/null +++ b/cmd/lakebox/create.go @@ -0,0 +1,84 @@ +package lakebox + +import ( + "fmt" + "os" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newCreateCommand() *cobra.Command { + var publicKeyFile string + + cmd := &cobra.Command{ + Use: "create", + Short: "Create a new Lakebox environment", + Long: `Create a new Lakebox environment. + +Creates a new personal development environment backed by a microVM. +Blocks until the lakebox is running and prints the lakebox ID. + +Example: + databricks lakebox create`, + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + var publicKey string + if publicKeyFile != "" { + data, err := os.ReadFile(publicKeyFile) + if err != nil { + return fmt.Errorf("failed to read public key file %s: %w", publicKeyFile, err) + } + publicKey = string(data) + } + + s := spin(ctx, "Provisioning your lakebox…") + defer s.Close() + + result, err := api.create(ctx, publicKey) + if err != nil { + s.fail("Failed to create lakebox") + return fmt.Errorf("failed to create lakebox: %w", err) + } + + s.ok("Lakebox " + cmdio.Bold(ctx, result.SandboxID) + " is " + status(ctx, result.Status)) + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + currentDefault := getDefault(ctx, profile) + shouldSetDefault := currentDefault == "" + if !shouldSetDefault && currentDefault != "" { + if _, err := api.get(ctx, currentDefault); err != nil { + shouldSetDefault = true + } + } + if shouldSetDefault { + if err := setDefault(ctx, profile, result.SandboxID); err != nil { + warn(ctx, fmt.Sprintf("Could not save default: %v", err)) + } else { + field(ctx, cmd.ErrOrStderr(), "default", result.SandboxID) + } + } + + blank(cmd.ErrOrStderr()) + fmt.Fprintln(cmd.OutOrStdout(), result.SandboxID) + return nil + }, + } + + cmd.Flags().StringVar(&publicKeyFile, "public-key-file", "", "Path to SSH public key file to install in the lakebox") + + return cmd +} diff --git a/cmd/lakebox/default.go b/cmd/lakebox/default.go new file mode 100644 index 0000000000..cd96df172d --- /dev/null +++ b/cmd/lakebox/default.go @@ -0,0 +1,40 @@ +package lakebox + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/spf13/cobra" +) + +func newSetDefaultCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "set-default ", + Short: "Set the default Lakebox for SSH", + Long: `Set the default Lakebox that 'databricks lakebox ssh' connects to. + +The default is stored locally in ~/.databricks/lakebox.json per profile. + +Example: + databricks lakebox set-default happy-panda-1234`, + Args: cobra.ExactArgs(1), + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + lakeboxID := args[0] + if err := setDefault(ctx, profile, lakeboxID); err != nil { + return fmt.Errorf("failed to set default: %w", err) + } + fmt.Fprintf(cmd.OutOrStdout(), "Default lakebox set to: %s\n", lakeboxID) + return nil + }, + } + return cmd +} diff --git a/cmd/lakebox/delete.go b/cmd/lakebox/delete.go new file mode 100644 index 0000000000..001a382252 --- /dev/null +++ b/cmd/lakebox/delete.go @@ -0,0 +1,56 @@ +package lakebox + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newDeleteCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete ", + Short: "Delete a Lakebox environment", + Long: `Delete a Lakebox environment. + +Permanently terminates and removes the specified lakebox. + +Example: + databricks lakebox delete happy-panda-1234`, + Args: cobra.ExactArgs(1), + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + lakeboxID := args[0] + s := spin(ctx, "Removing "+lakeboxID+"…") + defer s.Close() + + if err := api.delete(ctx, lakeboxID); err != nil { + s.fail("Failed to delete " + lakeboxID) + return fmt.Errorf("failed to delete lakebox %s: %w", lakeboxID, err) + } + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + if getDefault(ctx, profile) == lakeboxID { + _ = clearDefault(ctx, profile) + s.ok("Removed " + cmdio.Bold(ctx, lakeboxID) + " " + cmdio.Dim(ctx, "(default cleared)")) + } else { + s.ok("Removed " + cmdio.Bold(ctx, lakeboxID)) + } + return nil + }, + } + + return cmd +} diff --git a/cmd/lakebox/keyhash.go b/cmd/lakebox/keyhash.go new file mode 100644 index 0000000000..7f4fcd0bd4 --- /dev/null +++ b/cmd/lakebox/keyhash.go @@ -0,0 +1,33 @@ +package lakebox + +import ( + "crypto/sha256" + "encoding/hex" +) + +// keyHash returns the identifier the lakebox SSH-keys API assigns to a +// public key. The algorithm is sha256(" ") truncated to +// the first 16 bytes and hex-encoded; the OpenSSH comment (anything after +// the second whitespace-separated token) is stripped before hashing, so +// registering the same key under different comments yields the same hash. +// Inputs that don't have a second token are hashed as-is. +// +// Useful for matching a locally-known key against entries in a +// GET /ssh-keys listing without sending the key contents back to the +// server. +func keyHash(publicKey string) string { + // Slice off the OpenSSH comment by stopping at the second space. + end := len(publicKey) + spaces := 0 + for i, c := range publicKey { + if c == ' ' { + spaces++ + if spaces == 2 { + end = i + break + } + } + } + sum := sha256.Sum256([]byte(publicKey[:end])) + return hex.EncodeToString(sum[:16]) +} diff --git a/cmd/lakebox/keyhash_test.go b/cmd/lakebox/keyhash_test.go new file mode 100644 index 0000000000..638f1d8f34 --- /dev/null +++ b/cmd/lakebox/keyhash_test.go @@ -0,0 +1,55 @@ +package lakebox + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Inputs are synthetic; expected values are sha256(canonical input)[:16] +// in hex. The algorithm was verified against the live +// /api/2.0/lakebox/ssh-keys endpoint during exploration, so this test +// pins the algorithm — not a known set of real registered keys. +func TestKeyHash(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "single-token input hashed verbatim", + input: "a", + want: "ca978112ca1bbdcafac231b39a23dc4d", + }, + { + name: "type and blob with no comment", + input: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUMMY", + want: "2b366430eb9743668b652921d3b22d54", + }, + { + name: "comment is stripped before hashing", + input: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUMMY comment-one", + want: "2b366430eb9743668b652921d3b22d54", + }, + { + name: "different comment same key still matches", + input: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUMMY entirely-different-comment", + want: "2b366430eb9743668b652921d3b22d54", + }, + { + name: "longer key with multi-word comment", + input: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITESTKEY1234 test-from-cli-exploration", + want: "52c927705154e2d98a1b7036cc3e06dc", + }, + { + name: "empty input still produces a hash", + input: "", + want: "e3b0c44298fc1c149afbf4c8996fb924", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.want, keyHash(tc.input)) + }) + } +} diff --git a/cmd/lakebox/lakebox.go b/cmd/lakebox/lakebox.go new file mode 100644 index 0000000000..c4f7b6cc7e --- /dev/null +++ b/cmd/lakebox/lakebox.go @@ -0,0 +1,44 @@ +package lakebox + +import ( + "github.com/spf13/cobra" +) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "lakebox", + Short: "Manage Databricks Lakebox environments", + GroupID: "development", + Hidden: true, + Long: `Manage Databricks Lakebox environments. + +Lakebox provides SSH-accessible development environments backed by +microVM isolation. Each lakebox is a personal sandbox with pre-installed +tooling (Python, Node.js, Rust, Databricks CLI) and persistent storage. + +Getting started: + databricks auth login --host https://... # authenticate to a Databricks workspace + databricks lakebox register # generate and register an SSH key + databricks lakebox ssh # SSH to your default lakebox + +Common workflows: + databricks lakebox ssh # SSH to your default lakebox + databricks lakebox ssh my-project # SSH to a named lakebox + databricks lakebox list # list your lakeboxes + databricks lakebox create # create a new lakebox + databricks lakebox delete my-project # delete a lakebox + databricks lakebox status my-project # show lakebox status +`, + } + + cmd.AddCommand(newRegisterCommand()) + cmd.AddCommand(newSetDefaultCommand()) + cmd.AddCommand(newSSHCommand()) + cmd.AddCommand(newListCommand()) + cmd.AddCommand(newCreateCommand()) + cmd.AddCommand(newDeleteCommand()) + cmd.AddCommand(newStatusCommand()) + cmd.AddCommand(newConfigCommand()) + + return cmd +} diff --git a/cmd/lakebox/list.go b/cmd/lakebox/list.go new file mode 100644 index 0000000000..6dc1b42fb1 --- /dev/null +++ b/cmd/lakebox/list.go @@ -0,0 +1,113 @@ +package lakebox + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newListCommand() *cobra.Command { + var outputJSON bool + + cmd := &cobra.Command{ + Use: "list", + Short: "List your Lakebox environments", + Long: `List your Lakebox environments. + +Shows all lakeboxes associated with your account, including their +current status and ID. + +Example: + databricks lakebox list + databricks lakebox list --json`, + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + entries, err := api.list(ctx) + if err != nil { + return fmt.Errorf("failed to list lakeboxes: %w", err) + } + + if outputJSON { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(entries) + } + + if len(entries) == 0 { + fmt.Fprintf(cmd.ErrOrStderr(), " %s\n", cmdio.Dim(ctx, "No lakeboxes found.")) + return nil + } + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + defaultID := getDefault(ctx, profile) + + out := cmd.OutOrStdout() + + // Compute column widths. AUTOSTOP holds short tokens like + // `default`, `never`, `15m`, `1h30m` — 8 chars covers them. + col := 10 + autostopCol := 8 + for _, e := range entries { + if l := len(e.SandboxID); l > col { + col = l + } + if l := len(e.autoStopLabel()); l > autostopCol { + autostopCol = l + } + } + col += 2 + autostopCol += 2 + + blank(out) + header := fmt.Sprintf("%-*s %-10s %-*s %s", + col, "ID", "STATUS", autostopCol, "AUTOSTOP", "DEFAULT") + fmt.Fprintf(out, " %s\n", cmdio.Dim(ctx, header)) + fmt.Fprintf(out, " %s\n", cmdio.Dim(ctx, strings.Repeat("─", col+10+autostopCol+12))) + + for _, e := range entries { + id := e.SandboxID + def := "" + if id == defaultID { + def = cmdio.Cyan(ctx, "*") + } + // Pad ID manually so visible-width alignment is preserved + // after the helpers wrap each cell with ANSI escapes. + idPad := max(col-len(id), 0) + st := status(ctx, e.Status) + stPad := max(10-len(e.Status), 0) + as := e.autoStopLabel() + asPad := max(autostopCol-len(as), 0) + idStr := cmdio.Bold(ctx, id) + if strings.EqualFold(e.Status, "running") { + idStr = cmdio.Bold(ctx, cmdio.Cyan(ctx, id)) + } + fmt.Fprintf(out, " %s%s %s%s %s%s %s\n", + idStr, strings.Repeat(" ", idPad), + st, strings.Repeat(" ", stPad), + cmdio.Dim(ctx, as), strings.Repeat(" ", asPad), + def) + } + blank(out) + return nil + }, + } + + cmd.Flags().BoolVar(&outputJSON, "json", false, "Output as JSON") + + return cmd +} diff --git a/cmd/lakebox/register.go b/cmd/lakebox/register.go new file mode 100644 index 0000000000..c3e34e4ea3 --- /dev/null +++ b/cmd/lakebox/register.go @@ -0,0 +1,123 @@ +package lakebox + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/env" + "github.com/spf13/cobra" +) + +const lakeboxKeyName = "lakebox_rsa" + +func newRegisterCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "register", + Short: "Register this machine for lakebox SSH access", + Long: `Generate a dedicated SSH key for lakebox and register it with the service. + +This command: +1. Generates an RSA SSH key at ~/.ssh/lakebox_rsa (if it doesn't exist) +2. Registers the public key with the lakebox service + +After registration, 'databricks lakebox ssh' will use this key automatically. +Run this once per machine. + +Example: + databricks lakebox register`, + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + keyPath, generated, err := ensureLakeboxKey(ctx) + if err != nil { + return fmt.Errorf("failed to ensure lakebox SSH key: %w", err) + } + + stderr := cmd.ErrOrStderr() + if generated { + ok(ctx, "Generated SSH key at "+cmdio.Dim(ctx, keyPath)) + } else { + field(ctx, stderr, "key", keyPath) + } + + pubKeyData, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return fmt.Errorf("failed to read public key %s.pub: %w", keyPath, err) + } + + s := spin(ctx, "Registering key…") + defer s.Close() + if err := api.registerKey(ctx, string(pubKeyData)); err != nil { + s.fail("Failed to register key") + return fmt.Errorf("failed to register key: %w", err) + } + s.ok("SSH key registered") + + blank(stderr) + fmt.Fprintf(stderr, " Run %s to connect.\n\n", cmdio.Bold(ctx, "databricks lakebox ssh")) + return nil + }, + } + + return cmd +} + +// lakeboxKeyPath returns the path to the dedicated lakebox SSH key. +func lakeboxKeyPath(ctx context.Context) (string, error) { + homeDir, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".ssh", lakeboxKeyName), nil +} + +// ensureLakeboxKey returns the path to the lakebox SSH key, generating it if +// it doesn't exist. Returns (path, wasGenerated, error). +func ensureLakeboxKey(ctx context.Context) (string, bool, error) { + keyPath, err := lakeboxKeyPath(ctx) + if err != nil { + return "", false, err + } + + if _, err := os.Stat(keyPath); err == nil { + return keyPath, false, nil + } + + // Check that ssh-keygen is available before trying to generate. + if _, err := exec.LookPath("ssh-keygen"); err != nil { + return "", false, errors.New( + "ssh-keygen not found in PATH.\n" + + "Please install OpenSSH and run 'databricks lakebox register' again.\n" + + " macOS: brew install openssh\n" + + " Ubuntu: sudo apt install openssh-client\n" + + " Windows: install Git for Windows (includes ssh-keygen)") + } + + sshDir := filepath.Dir(keyPath) + if err := os.MkdirAll(sshDir, 0o700); err != nil { + return "", false, fmt.Errorf("failed to create %s: %w", sshDir, err) + } + + genCmd := exec.Command("ssh-keygen", "-t", "rsa", "-b", "4096", "-f", keyPath, "-N", "", "-q", "-C", "lakebox") + genCmd.Stdin = os.Stdin + genCmd.Stdout = os.Stderr + genCmd.Stderr = os.Stderr + if err := genCmd.Run(); err != nil { + return "", false, fmt.Errorf("ssh-keygen failed: %w", err) + } + + return keyPath, true, nil +} diff --git a/cmd/lakebox/ssh.go b/cmd/lakebox/ssh.go new file mode 100644 index 0000000000..9ba1b4957a --- /dev/null +++ b/cmd/lakebox/ssh.go @@ -0,0 +1,172 @@ +package lakebox + +import ( + "errors" + "fmt" + "io/fs" + "os" + "strings" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/execv" + "github.com/spf13/cobra" +) + +const ( + defaultGatewayHost = "uw2.dbrx.dev" + stagingDefaultGatewayHost = "uw2.s.dbrx.dev" + defaultGatewayPort = "2222" +) + +// resolveGatewayHost picks the SSH gateway hostname based on the workspace host. +// Staging workspaces (*.staging.cloud.databricks.com etc.) route through +// uw2.s.dbrx.dev; everything else uses prod uw2.dbrx.dev. +func resolveGatewayHost(workspaceHost string) string { + if strings.Contains(workspaceHost, ".staging.") { + return stagingDefaultGatewayHost + } + return defaultGatewayHost +} + +func newSSHCommand() *cobra.Command { + var gatewayHost string + var gatewayPort string + + cmd := &cobra.Command{ + Use: "ssh [lakebox-id] [-- ...]", + Short: "SSH into a Lakebox environment", + Long: `SSH into a Lakebox environment. + +Connect to your default or a named lakebox via SSH. Extra arguments +after -- are passed directly to the ssh process. This lets you run +remote commands, set up port forwarding, or pass any other ssh flags. + +Examples: + databricks lakebox ssh # interactive shell on default lakebox + databricks lakebox ssh happy-panda-1234 # interactive shell on specific lakebox + databricks lakebox ssh -- ls -la /home # run command on default lakebox + databricks lakebox ssh happy-panda-1234 -- cat /etc/os-release # run command on specific lakebox + databricks lakebox ssh -- -L 8080:localhost:8080 # port forwarding on default lakebox`, + Args: cobra.ArbitraryArgs, + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + profile := w.Config.Profile + if profile == "" { + profile = w.Config.Host + } + + // Use the dedicated lakebox SSH key. + keyPath, err := lakeboxKeyPath(ctx) + if err != nil { + return fmt.Errorf("failed to determine lakebox key path: %w", err) + } + if _, err := os.Stat(keyPath); errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("lakebox SSH key not found at %s — run 'databricks lakebox register' first", keyPath) + } + + // Parse args: everything before -- is the optional lakebox ID, + // everything after -- is passed through to ssh. + var lakeboxID string + var extraArgs []string + + switch dashAt := cmd.ArgsLenAtDash(); dashAt { + case -1: + if len(args) > 0 { + lakeboxID = args[0] + } + case 0: + extraArgs = args[dashAt:] + default: + lakeboxID = args[0] + extraArgs = args[dashAt:] + } + + // Determine lakebox ID if not explicit. + if lakeboxID == "" { + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + // If we have a saved default, confirm it still exists on the + // server. The lakebox may have been auto-stopped, deleted from + // another machine, or reaped by an admin since we wrote the + // state file. Clear the stale entry and fall through to + // provisioning a fresh one. + if def := getDefault(ctx, profile); def != "" { + if _, err := api.get(ctx, def); err == nil { + lakeboxID = def + } else { + warn(ctx, fmt.Sprintf("Saved default %s is gone; provisioning a new lakebox", def)) + _ = clearDefault(ctx, profile) + } + } + + if lakeboxID == "" { + pubKeyData, err := os.ReadFile(keyPath + ".pub") + if err != nil { + return fmt.Errorf("failed to read public key %s.pub: %w", keyPath, err) + } + + s := spin(ctx, "Provisioning your lakebox…") + defer s.Close() + result, err := api.create(ctx, string(pubKeyData)) + if err != nil { + s.fail("Failed to create lakebox") + return fmt.Errorf("failed to create lakebox: %w", err) + } + lakeboxID = result.SandboxID + s.ok("Lakebox " + cmdio.Bold(ctx, lakeboxID) + " ready") + + if err := setDefault(ctx, profile, lakeboxID); err != nil { + warn(ctx, fmt.Sprintf("Could not save default: %v", err)) + } + } + } + + host := gatewayHost + if host == "" { + host = resolveGatewayHost(w.Config.Host) + } + + s := spin(ctx, "Connecting to "+cmdio.Bold(ctx, lakeboxID)+"…") + defer s.Close() + s.ok("Connected to " + cmdio.Bold(ctx, lakeboxID)) + return execSSHDirect(lakeboxID, host, gatewayPort, keyPath, extraArgs) + }, + } + + cmd.Flags().StringVar(&gatewayHost, "gateway", "", "Lakebox gateway hostname (auto-detected from profile if empty)") + cmd.Flags().StringVar(&gatewayPort, "port", defaultGatewayPort, "Lakebox gateway SSH port") + + return cmd +} + +// execSSHDirect replaces the CLI process with ssh (or simulates that on +// Windows via execv). All options are passed on the command line, so no +// ~/.ssh/config entry is required. Extra args are appended after the +// destination for remote commands or ssh flags. +func execSSHDirect(lakeboxID, host, port, keyPath string, extraArgs []string) error { + args := []string{ + "ssh", + "-i", keyPath, + "-p", port, + "-o", "IdentitiesOnly=yes", + "-o", "PreferredAuthentications=publickey", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "LogLevel=ERROR", + fmt.Sprintf("%s@%s", lakeboxID, host), + } + args = append(args, extraArgs...) + + return execv.Execv(execv.Options{ + Args: args, + Env: os.Environ(), + }) +} diff --git a/cmd/lakebox/state.go b/cmd/lakebox/state.go new file mode 100644 index 0000000000..5be3da1d4a --- /dev/null +++ b/cmd/lakebox/state.go @@ -0,0 +1,101 @@ +package lakebox + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/databricks/cli/libs/env" +) + +// stateFile stores per-profile lakebox defaults on the local filesystem. +// Located at ~/.databricks/lakebox.json. +type stateFile struct { + // Profile name → default lakebox ID. + Defaults map[string]string `json:"defaults"` +} + +func stateFilePath(ctx context.Context) (string, error) { + home, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } + return filepath.Join(home, ".databricks", "lakebox.json"), nil +} + +func loadState(ctx context.Context) (*stateFile, error) { + path, err := stateFilePath(ctx) + if err != nil { + return nil, err + } + + data, err := os.ReadFile(path) + if errors.Is(err, fs.ErrNotExist) { + return &stateFile{Defaults: make(map[string]string)}, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", path, err) + } + + var state stateFile + if err := json.Unmarshal(data, &state); err != nil { + return nil, fmt.Errorf("failed to parse %s: %w", path, err) + } + if state.Defaults == nil { + state.Defaults = make(map[string]string) + } + return &state, nil +} + +func saveState(ctx context.Context, state *stateFile) error { + path, err := stateFilePath(ctx) + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return err + } + + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + return os.WriteFile(path, data, 0o600) +} + +func getDefault(ctx context.Context, profile string) string { + state, err := loadState(ctx) + if err != nil { + return "" + } + return state.Defaults[profile] +} + +func setDefault(ctx context.Context, profile, lakeboxID string) error { + state, err := loadState(ctx) + if err != nil { + return err + } + if state.Defaults[profile] == lakeboxID { + return nil + } + state.Defaults[profile] = lakeboxID + return saveState(ctx, state) +} + +func clearDefault(ctx context.Context, profile string) error { + state, err := loadState(ctx) + if err != nil { + return err + } + if _, ok := state.Defaults[profile]; !ok { + return nil + } + delete(state.Defaults, profile) + return saveState(ctx, state) +} diff --git a/cmd/lakebox/state_test.go b/cmd/lakebox/state_test.go new file mode 100644 index 0000000000..a7488de2e1 --- /dev/null +++ b/cmd/lakebox/state_test.go @@ -0,0 +1,171 @@ +package lakebox + +import ( + "context" + "io/fs" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// stateCtx returns a context whose $HOME is a temp directory, so state file +// operations are isolated from the developer's real ~/.databricks/lakebox.json. +func stateCtx(t *testing.T) (context.Context, string) { + t.Helper() + home := t.TempDir() + ctx := env.WithUserHomeDir(t.Context(), home) + return ctx, filepath.Join(home, ".databricks", "lakebox.json") +} + +func TestStateLoadMissingFileReturnsEmpty(t *testing.T) { + ctx, _ := stateCtx(t) + state, err := loadState(ctx) + require.NoError(t, err) + assert.Equal(t, &stateFile{Defaults: map[string]string{}}, state) +} + +func TestStateGetDefaultMissingProfileReturnsEmpty(t *testing.T) { + ctx, _ := stateCtx(t) + assert.Equal(t, "", getDefault(ctx, "any-profile")) +} + +func TestStateSetGetDefaultRoundTrip(t *testing.T) { + ctx, _ := stateCtx(t) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + assert.Equal(t, "lakebox-a", getDefault(ctx, "profile-a")) + assert.Equal(t, "", getDefault(ctx, "profile-b")) +} + +func TestStateMultipleProfilesIndependent(t *testing.T) { + ctx, _ := stateCtx(t) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + require.NoError(t, setDefault(ctx, "profile-b", "lakebox-b")) + + assert.Equal(t, "lakebox-a", getDefault(ctx, "profile-a")) + assert.Equal(t, "lakebox-b", getDefault(ctx, "profile-b")) +} + +func TestStateSetDefaultOverwrites(t *testing.T) { + ctx, _ := stateCtx(t) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a-prime")) + assert.Equal(t, "lakebox-a-prime", getDefault(ctx, "profile-a")) +} + +func TestStateClearDefault(t *testing.T) { + ctx, _ := stateCtx(t) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + require.NoError(t, setDefault(ctx, "profile-b", "lakebox-b")) + + require.NoError(t, clearDefault(ctx, "profile-a")) + assert.Equal(t, "", getDefault(ctx, "profile-a")) + assert.Equal(t, "lakebox-b", getDefault(ctx, "profile-b")) +} + +func TestStateClearDefaultMissingProfileDoesNotCreateFile(t *testing.T) { + ctx, path := stateCtx(t) + + require.NoError(t, clearDefault(ctx, "no-such-profile")) + + _, err := os.Stat(path) + assert.ErrorIs(t, err, fs.ErrNotExist, "clearDefault must not create the state file when there's nothing to remove") +} + +func TestStateSetDefaultSameValueDoesNotRewriteFile(t *testing.T) { + ctx, path := stateCtx(t) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + before, err := os.Stat(path) + require.NoError(t, err) + + // Re-set with the same value should be a no-op. + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + after, err := os.Stat(path) + require.NoError(t, err) + assert.Equal(t, before.ModTime(), after.ModTime(), "no-op setDefault must not rewrite the file") +} + +func TestStateSetDefaultMissingNoFileBeforeWrite(t *testing.T) { + ctx, path := stateCtx(t) + + // Loading state on a fresh tempdir must not create the file. + assert.Equal(t, "", getDefault(ctx, "profile-a")) + _, err := os.Stat(path) + assert.ErrorIs(t, err, fs.ErrNotExist, "getDefault must not create the state file") +} + +// Pre-existing files from earlier CLI versions carry a `last_profile` field +// the current schema doesn't know about. loadState must accept the file +// (silently dropping the unknown field) and saveState must rewrite without +// it, so the field naturally falls off on the next mutation. +func TestStateLoadIgnoresUnknownFields(t *testing.T) { + ctx, path := stateCtx(t) + require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o700)) + require.NoError(t, os.WriteFile(path, []byte(`{ + "defaults": {"profile-a": "lakebox-a"}, + "last_profile": "profile-a" + }`), 0o600)) + + assert.Equal(t, "lakebox-a", getDefault(ctx, "profile-a")) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a-prime")) + rewritten, err := os.ReadFile(path) + require.NoError(t, err) + assert.NotContains(t, string(rewritten), "last_profile") +} + +func TestStateLoadReturnsErrorOnCorruptJSON(t *testing.T) { + ctx, path := stateCtx(t) + require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o700)) + require.NoError(t, os.WriteFile(path, []byte("{not valid json"), 0o600)) + + _, err := loadState(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to parse") +} + +// Files written by saveState must round-trip through loadState even if the +// caller starts from an empty Defaults map. +func TestStateSaveCreatesParentDirs(t *testing.T) { + ctx, path := stateCtx(t) + + // Confirm parent dir does not exist yet. + _, err := os.Stat(filepath.Dir(path)) + assert.ErrorIs(t, err, fs.ErrNotExist) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + + // File and parent dir now exist with sensible perms. + info, err := os.Stat(path) + require.NoError(t, err) + + dirInfo, err := os.Stat(filepath.Dir(path)) + require.NoError(t, err) + + // Windows does not honor Unix permission bits; os.Stat reports 0o666/0o777 + // regardless of what was passed to OpenFile/MkdirAll. + if runtime.GOOS != "windows" { + assert.Equal(t, os.FileMode(0o600), info.Mode().Perm()) + assert.Equal(t, os.FileMode(0o700), dirInfo.Mode().Perm()) + } +} + +// Defaults of nil on disk (legal but not what saveState produces) must still +// load to a usable empty map so callers can setDefault without nil-deref. +func TestStateLoadNilDefaultsMap(t *testing.T) { + ctx, path := stateCtx(t) + require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o700)) + require.NoError(t, os.WriteFile(path, []byte(`{}`), 0o600)) + + require.NoError(t, setDefault(ctx, "profile-a", "lakebox-a")) + assert.Equal(t, "lakebox-a", getDefault(ctx, "profile-a")) +} diff --git a/cmd/lakebox/status.go b/cmd/lakebox/status.go new file mode 100644 index 0000000000..ee9c276aa8 --- /dev/null +++ b/cmd/lakebox/status.go @@ -0,0 +1,63 @@ +package lakebox + +import ( + "encoding/json" + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newStatusCommand() *cobra.Command { + var outputJSON bool + + cmd := &cobra.Command{ + Use: "status ", + Short: "Show Lakebox environment status", + Long: `Show detailed status of a Lakebox environment. + +Example: + lakebox status happy-panda-1234 + lakebox status happy-panda-1234 --json`, + Args: cobra.ExactArgs(1), + PreRunE: root.MustWorkspaceClient, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + api, err := newLakeboxAPI(w) + if err != nil { + return err + } + + lakeboxID := args[0] + + entry, err := api.get(ctx, lakeboxID) + if err != nil { + return fmt.Errorf("failed to get lakebox %s: %w", lakeboxID, err) + } + + if outputJSON { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(entry) + } + + out := cmd.OutOrStdout() + blank(out) + field(ctx, out, "id", cmdio.Bold(ctx, entry.SandboxID)) + field(ctx, out, "status", status(ctx, entry.Status)) + if entry.FQDN != "" { + field(ctx, out, "fqdn", cmdio.Dim(ctx, entry.FQDN)) + } + field(ctx, out, "autostop", cmdio.Dim(ctx, entry.autoStopLabel())) + blank(out) + return nil + }, + } + + cmd.Flags().BoolVar(&outputJSON, "json", false, "Output as JSON") + + return cmd +} diff --git a/cmd/lakebox/ui.go b/cmd/lakebox/ui.go new file mode 100644 index 0000000000..a2904c7fe2 --- /dev/null +++ b/cmd/lakebox/ui.go @@ -0,0 +1,77 @@ +package lakebox + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/databricks/cli/libs/cmdio" +) + +// cmdioSpinner is the subset of *cmdio.spinner's method set we need. +// Defining the interface locally lets us hold the unexported type as a +// struct field; cmdio's spinner satisfies it structurally. +type cmdioSpinner interface { + Update(msg string) + Close() +} + +// spinner wraps cmdio.NewSpinner with ok/fail markers. ok and fail close the +// underlying spinner and log a final ✓/✗ line; Close stops the spinner +// without printing. cmdio's Close is itself idempotent, so a `defer s.Close()` +// is safe alongside an ok/fail call on the success path. +type spinner struct { + cmdioSpinner + ctx context.Context +} + +func spin(ctx context.Context, msg string) *spinner { + sp := cmdio.NewSpinner(ctx) + sp.Update(msg) + return &spinner{cmdioSpinner: sp, ctx: ctx} +} + +func (s *spinner) ok(msg string) { s.mark("✓", msg) } +func (s *spinner) fail(msg string) { s.mark("✗", msg) } + +func (s *spinner) mark(mark, msg string) { + s.Close() + cmdio.LogString(s.ctx, " "+cmdio.Cyan(s.ctx, mark)+" "+msg) +} + +// status formats a lakebox lifecycle status with a color hint. +func status(ctx context.Context, s string) string { + switch strings.ToLower(s) { + case "running": + return cmdio.Cyan(ctx, "running") + case "stopped": + return cmdio.Dim(ctx, "stopped") + case "creating": + return cmdio.Bold(ctx, cmdio.Cyan(ctx, "creating…")) + default: + return cmdio.Dim(ctx, strings.ToLower(s)) + } +} + +// field prints " label value" to w, where label is dimmed and padded to a +// fixed visible width. Padding has to happen before Dim so the SGR escapes +// don't inflate the byte count and break column alignment. +func field(ctx context.Context, w io.Writer, label, value string) { + fmt.Fprintf(w, " %s %s\n", cmdio.Dim(ctx, fmt.Sprintf("%-10s", label)), value) +} + +// ok prints " ✓ message" to stderr via the cmdio context. +func ok(ctx context.Context, msg string) { + cmdio.LogString(ctx, " "+cmdio.Cyan(ctx, "✓")+" "+msg) +} + +// warn prints " ! message" to stderr via the cmdio context. +func warn(ctx context.Context, msg string) { + cmdio.LogString(ctx, " "+cmdio.Cyan(ctx, "!")+" "+msg) +} + +// blank prints an empty line to w. +func blank(w io.Writer) { + fmt.Fprintln(w) +} diff --git a/libs/cmdio/color.go b/libs/cmdio/color.go index 4066b30f75..a2a7ce24e2 100644 --- a/libs/cmdio/color.go +++ b/libs/cmdio/color.go @@ -11,6 +11,7 @@ import ( const ( ansiReset = "\x1b[0m" ansiBold = "\x1b[1m" + ansiDim = "\x1b[2m" ansiItalic = "\x1b[3m" ansiRed = "\x1b[31m" ansiGreen = "\x1b[32m" @@ -42,6 +43,12 @@ func render(ctx context.Context, code, msg string) string { return code + msg + ansiReset } +// Bold renders msg in bold. +func Bold(ctx context.Context, msg string) string { return render(ctx, ansiBold, msg) } + +// Dim renders msg in dim (faint) intensity. +func Dim(ctx context.Context, msg string) string { return render(ctx, ansiDim, msg) } + // Red renders msg in red. func Red(ctx context.Context, msg string) string { return render(ctx, ansiRed, msg) } diff --git a/libs/cmdio/color_test.go b/libs/cmdio/color_test.go index 54df185982..dcc45f9c94 100644 --- a/libs/cmdio/color_test.go +++ b/libs/cmdio/color_test.go @@ -27,6 +27,8 @@ func TestColorHelpersEmitSGRWhenEnabled(t *testing.T) { got string want string }{ + {"Bold", cmdio.Bold(ctx, "id"), "\x1b[1mid\x1b[0m"}, + {"Dim", cmdio.Dim(ctx, "hint"), "\x1b[2mhint\x1b[0m"}, {"Red", cmdio.Red(ctx, "hello"), "\x1b[31mhello\x1b[0m"}, {"Green", cmdio.Green(ctx, "ok"), "\x1b[32mok\x1b[0m"}, {"Yellow", cmdio.Yellow(ctx, "warn"), "\x1b[33mwarn\x1b[0m"},