From bb85756ce7b79bdfc7ba87bce77e66ce24550301 Mon Sep 17 00:00:00 2001 From: Paul Thurlow Date: Thu, 16 Apr 2026 19:08:10 -0700 Subject: [PATCH] rename sandbox and queries commands --- README.md | 54 ++-- skills/hotdata/SKILL.md | 77 +++--- .../hotdata/references/DATA_MODEL.template.md | 8 - skills/hotdata/references/WORKFLOWS.md | 56 +--- src/api.rs | 74 +---- src/command.rs | 119 ++------- src/config.rs | 12 +- src/main.rs | 69 +++-- src/queries.rs | 252 ++++++++---------- src/{sessions.rs => sandbox.rs} | 118 ++++---- src/workspace.rs | 2 +- tests/{session_env.rs => sandbox_env.rs} | 34 +-- 12 files changed, 328 insertions(+), 547 deletions(-) rename src/{sessions.rs => sandbox.rs} (68%) rename tests/{session_env.rs => sandbox_env.rs} (66%) diff --git a/README.md b/README.md index b43062f..65dfbcd 100644 --- a/README.md +++ b/README.md @@ -66,12 +66,12 @@ API key priority (lowest to highest): config file → `HOTDATA_API_KEY` env var | `tables` | `list` | List tables and columns | | `datasets` | `list`, `create` | Manage uploaded datasets | | `query` | | Execute a SQL query | -| `queries` | `list`, `create`, `update`, `run` | Manage saved queries | +| `queries` | `list` | Inspect query run history | | `search` | | Full-text search across a table column | | `indexes` | `list`, `create` | Manage indexes on a table | | `results` | `list` | Retrieve stored query results | | `jobs` | `list` | Manage background jobs | -| `sessions` | `list`, `new`, `set`, `read`, `update`, `run` | Manage work sessions | +| `sandbox` | `list`, `new`, `set`, `read`, `update`, `run` | Manage sandboxes | | `skills` | `install`, `status` | Manage the hotdata agent skill | ## Global options @@ -160,21 +160,17 @@ hotdata query status [-o table|json|csv] - Use `hotdata query status ` to poll for results. - Exit codes for `query status`: `0` = succeeded, `1` = failed, `2` = still running (poll again). -## Saved Queries +## Query Run History ```sh -hotdata queries list [--limit ] [--offset ] [--format table|json|yaml] -hotdata queries [--format table|json|yaml] -hotdata queries create --name "My Query" --sql "SELECT ..." [--description "..."] [--tags "tag1,tag2"] -hotdata queries update [--name "New Name"] [--sql "SELECT ..."] [--description "..."] [--tags "tag1,tag2"] -hotdata queries run [--format table|json|csv] +hotdata queries list [--limit ] [--cursor ] [--status ] [-o table|json|yaml] +hotdata queries [-o table|json|yaml] ``` -- `list` shows saved queries with name, description, tags, and version. -- View a query by ID to see its formatted and syntax-highlighted SQL. -- `create` requires `--name` and `--sql`. Tags are comma-separated. -- `update` accepts any combination of fields to change. -- `run` executes a saved query and displays results like the `query` command. +- `list` shows past query executions with status, creation time, duration, row count, and a truncated SQL preview (default limit 20). +- `--status` filters by run status (comma-separated, e.g. `--status running,failed`). +- View a run by ID to see full metadata (timings, `result_id`, snapshot, hashes) and the formatted, syntax-highlighted SQL. +- If a run has a `result_id`, fetch its rows with `hotdata results `. ## Search @@ -226,27 +222,27 @@ hotdata jobs [--workspace-id ] [--format table|json|yaml] - `--job-type` accepts: `data_refresh_table`, `data_refresh_connection`, `create_index`. - `--status` accepts: `pending`, `running`, `succeeded`, `partially_succeeded`, `failed`. -## Sessions +## Sandboxes -Sessions group related CLI activity (queries, dataset operations, etc.) under a single context. +Sandboxes group related CLI activity (queries, dataset operations, etc.) under a single context. ```sh -hotdata sessions list [-w ] [-o table|json|yaml] -hotdata sessions [-w ] [-o table|json|yaml] -hotdata sessions new [--name "My Session"] [-o table|json|yaml] -hotdata sessions set [] -hotdata sessions read -hotdata sessions update [] [--name "New Name"] [--markdown "..."] [-o table|json|yaml] -hotdata sessions run [args...] -hotdata sessions run [args...] +hotdata sandbox list [-w ] [-o table|json|yaml] +hotdata sandbox [-w ] [-o table|json|yaml] +hotdata sandbox new [--name "My Sandbox"] [-o table|json|yaml] +hotdata sandbox set [] +hotdata sandbox read +hotdata sandbox update [] [--name "New Name"] [--markdown "..."] [-o table|json|yaml] +hotdata sandbox run [args...] +hotdata sandbox run [args...] ``` -- `list` shows all sessions with a `*` marker on the active one. -- `new` creates a session and sets it as active. -- `set` switches the active session. Omit the ID to clear the active session. -- `read` prints the markdown content of the current session. -- `update` modifies the name or markdown of a session (defaults to the active session). -- `run` runs a command with the hotdata CLI sandboxed in a session. Creates a new session unless a session ID is provided before `run`. Useful for launching an agent that can only access session data. Nesting sessions is not allowed. +- `list` shows all sandboxes with a `*` marker on the active one. +- `new` creates a sandbox and sets it as active. +- `set` switches the active sandbox. Omit the ID to clear the active sandbox. +- `read` prints the markdown content of the current sandbox. +- `update` modifies the name or markdown of a sandbox (defaults to the active sandbox). +- `run` runs a command with the hotdata CLI scoped to a sandbox. Creates a new sandbox unless a sandbox ID is provided before `run`. Useful for launching an agent that can only access sandbox data. Nesting sandboxes is not allowed. ## Configuration diff --git a/skills/hotdata/SKILL.md b/skills/hotdata/SKILL.md index de073dc..4d9ce8f 100644 --- a/skills/hotdata/SKILL.md +++ b/skills/hotdata/SKILL.md @@ -1,6 +1,6 @@ --- name: hotdata -description: Use this skill when the user wants to run hotdata CLI commands, query the Hotdata API, list workspaces, list connections, create connections, list tables, manage datasets, execute SQL queries, manage saved queries, search tables, manage indexes, manage sessions, or interact with the hotdata service. Activate when the user says "run hotdata", "query hotdata", "list workspaces", "list connections", "create a connection", "list tables", "list datasets", "create a dataset", "upload a dataset", "execute a query", "search a table", "list indexes", "create an index", "list saved queries", "run a saved query", "list sessions", "create a session", "run a session", or asks you to use the hotdata CLI. +description: Use this skill when the user wants to run hotdata CLI commands, query the Hotdata API, list workspaces, list connections, create connections, list tables, manage datasets, execute SQL queries, inspect query run history, search tables, manage indexes, manage sandboxes, or interact with the hotdata service. Activate when the user says "run hotdata", "query hotdata", "list workspaces", "list connections", "create a connection", "list tables", "list datasets", "create a dataset", "upload a dataset", "execute a query", "search a table", "list indexes", "create an index", "list query runs", "list past queries", "query history", "list sandboxes", "create a sandbox", "run a sandbox", or asks you to use the hotdata CLI. version: 0.1.9 --- @@ -29,13 +29,12 @@ API URL defaults to `https://api.hotdata.dev/v1` or overridden via `HOTDATA_API_ All commands that accept `--workspace-id` are optional. If omitted, the active workspace is used. Use `hotdata workspaces set` to switch the active workspace interactively, or pass a workspace ID directly: `hotdata workspaces set `. The active workspace is shown with a `*` marker in `hotdata workspaces list`. **Omit `--workspace-id` unless you need to target a specific workspace.** -## Multi-step workflows (Model, Library, History, Chain, Indexes) +## Multi-step workflows (Model, History, Chain, Indexes) These are **patterns** built from the commands below—not separate CLI subcommands: - **Model** — Markdown semantic map of your workspace (entities, keys, joins). Refresh using `connections`, `connections refresh`, `tables list`, and `datasets list`. For a **deep** modeling pass (connector enrichment, indexes, per-table detail), see [references/MODEL_BUILD.md](references/MODEL_BUILD.md). -- **Library** — Curated **`hotdata queries`** entries for repeatable SQL (`queries create`, `queries run`, …). -- **History** — Find prior **`hotdata results`** and saved queries (`results list`, `results `, `queries list`). +- **History** — Inspect prior activity via `hotdata queries list` (query runs) and `hotdata results list` / `results ` (row data). - **Chain** — Follow-ups via **`datasets create`** then `query` against `datasets.main.`. - **Indexes** — Review SQL and schema, compare to existing indexes, create **sorted**, **bm25**, or **vector** indexes when it clearly helps; see [references/WORKFLOWS.md](references/WORKFLOWS.md#indexes). @@ -215,21 +214,15 @@ hotdata results [-w ] [-o table|json|csv] - Query output also includes a `result-id` in the footer (e.g. `[result-id: rslt...]`). - **Always use `results list` / `results ` to retrieve past query results rather than re-running the same query.** Re-running queries wastes resources and may return different results. -### Saved Queries +### Query Run History ``` -hotdata queries list [--limit ] [--offset ] [--format table|json|yaml] -hotdata queries [--format table|json|yaml] -hotdata queries create --name "My Query" --sql "SELECT ..." [--description "..."] [--tags "tag1,tag2"] [--format table|json|yaml] -hotdata queries update [--name "New Name"] [--sql "SELECT ..."] [--description "..."] [--tags "tag1,tag2"] [--category "..."] [--table-size "..."] [--format table|json|yaml] -hotdata queries run [--format table|json|csv] +hotdata queries list [--limit ] [--cursor ] [--status ] [-o table|json|yaml] +hotdata queries [-o table|json|yaml] ``` -- `list` shows saved queries with name, description, tags, and version. -- View a query by ID to see its formatted and syntax-highlighted SQL. -- `create` requires `--name` and `--sql`. Tags are comma-separated. -- `update` accepts any combination of `--name`, `--sql`, `--description`, and `--tags` to change those fields. -- `update` also supports `--category` and `--table-size` for metadata; pass an **empty string** for either flag to clear its value. -- `run` executes a saved query and displays results like the `query` command. -- **Use `queries run` instead of re-typing SQL when a saved query exists.** +- `list` shows query runs with status, creation time, duration, row count, and a truncated SQL preview (default limit 20). +- `--status` filters by run status (comma-separated, e.g. `--status running,failed`). +- View a run by ID to see full metadata (timings, `result_id`, snapshot, hashes) and the formatted, syntax-highlighted SQL. +- If a run has a `result_id`, fetch its rows with `hotdata results `. ### Search ``` @@ -276,38 +269,38 @@ hotdata auth status # Check current auth status hotdata auth logout # Remove saved auth for the default profile ``` -### Sessions +### Sandboxes -Sessions are for **ad-hoc, exploratory work** that does not need to be long-lived. They group related CLI activity (queries, dataset operations, etc.) under a single context so it can be tracked and cleaned up together. **Datasets created inside a session are tied to that session and will be removed when the session ends.** If you need data to persist beyond the session, create datasets outside of a session context. +Sandboxes are for **ad-hoc, exploratory work** that does not need to be long-lived. They group related CLI activity (queries, dataset operations, etc.) under a single context so it can be tracked and cleaned up together. **Datasets created inside a sandbox are tied to that sandbox and will be removed when the sandbox ends.** If you need data to persist beyond the sandbox, create datasets outside of a sandbox context. -> **IMPORTANT: If `HOTDATA_SESSION` is set in the environment, you are inside an active session. NEVER attempt to unset, override, or work around this variable. Do not clear it, do not start a new session, do not run `sessions run` or `sessions new` or `sessions set`. All your work should be attributed to the current session. Attempting to nest or escape a session will fail with an error.** +> **IMPORTANT: If `HOTDATA_SANDBOX` is set in the environment, you are inside an active sandbox. NEVER attempt to unset, override, or work around this variable. Do not clear it, do not start a new sandbox, do not run `sandbox run` or `sandbox new` or `sandbox set`. All your work should be attributed to the current sandbox. Attempting to nest or escape a sandbox will fail with an error.** ``` -hotdata sessions list [-w ] [-o table|json|yaml] -hotdata sessions [-w ] [-o table|json|yaml] -hotdata sessions new [--name "Session Name"] [-o table|json|yaml] -hotdata sessions set [] -hotdata sessions read -hotdata sessions update [] [--name "New Name"] [--markdown "..."] [-o table|json|yaml] -hotdata sessions run [args...] -hotdata sessions run [args...] +hotdata sandbox list [-w ] [-o table|json|yaml] +hotdata sandbox [-w ] [-o table|json|yaml] +hotdata sandbox new [--name "Sandbox Name"] [-o table|json|yaml] +hotdata sandbox set [] +hotdata sandbox read +hotdata sandbox update [] [--name "New Name"] [--markdown "..."] [-o table|json|yaml] +hotdata sandbox run [args...] +hotdata sandbox run [args...] ``` -- `list` shows all sessions with a `*` marker on the active one. -- `new` creates a session and sets it as active. Blocked inside an existing session. -- `set` switches the active session. Omit the ID to clear. Blocked inside an existing session. -- `read` prints the markdown content of the current session. Use this to retrieve session state at the start of work or between steps. -- `update` modifies a session's name or markdown. Defaults to the active session if no ID is given. The `--markdown` field is for writing details about the work being done in the session — observations, intermediate findings, next steps, etc. This state persists for the life of the session and is the primary way to record context that should survive across commands or agent invocations within the session. -- `run` launches a command with `HOTDATA_SESSION` and `HOTDATA_WORKSPACE` set in the child process environment. Creates a new session unless a session ID is provided before `run`. Blocked inside an existing session. -- When inside a session (HOTDATA_SESSION is set), all API requests automatically include the session ID — no extra flags needed. +- `list` shows all sandboxes with a `*` marker on the active one. +- `new` creates a sandbox and sets it as active. Blocked inside an existing sandbox. +- `set` switches the active sandbox. Omit the ID to clear. Blocked inside an existing sandbox. +- `read` prints the markdown content of the current sandbox. Use this to retrieve sandbox state at the start of work or between steps. +- `update` modifies a sandbox's name or markdown. Defaults to the active sandbox if no ID is given. The `--markdown` field is for writing details about the work being done in the sandbox — observations, intermediate findings, next steps, etc. This state persists for the life of the sandbox and is the primary way to record context that should survive across commands or agent invocations within the sandbox. +- `run` launches a command with `HOTDATA_SANDBOX` and `HOTDATA_WORKSPACE` set in the child process environment. Creates a new sandbox unless a sandbox ID is provided before `run`. Blocked inside an existing sandbox. +- When inside a sandbox (HOTDATA_SANDBOX is set), all API requests automatically include the sandbox ID — no extra flags needed. -#### Example: Building a data model in a session +#### Example: Building a data model in a sandbox -Use a session to explore tables and iteratively build a model description in the session markdown. +Use a sandbox to explore tables and iteratively build a model description in the sandbox markdown. -1. Start a session: +1. Start a sandbox: ``` - hotdata sessions new --name "Model: sales pipeline" + hotdata sandbox new --name "Model: sales pipeline" ``` 2. Inspect tables and columns: ``` @@ -318,9 +311,9 @@ Use a session to explore tables and iteratively build a model description in the hotdata query "SELECT DISTINCT status FROM sales.public.deals LIMIT 20" hotdata query "SELECT count(*), count(DISTINCT account_id) FROM sales.public.deals" ``` -4. Write findings into the session markdown as you go: +4. Write findings into the sandbox markdown as you go: ``` - hotdata sessions update --markdown "## sales pipeline model + hotdata sandbox update --markdown "## sales pipeline model ### deals (sales.public.deals) - PK: id @@ -337,7 +330,7 @@ Use a session to explore tables and iteratively build a model description in the - check how line_items joins to deals - confirm revenue column semantics" ``` -5. Continue exploring and update the markdown as the model takes shape. The markdown is the living artifact — when the session ends, its content captures what was learned. +5. Continue exploring and update the markdown as the model takes shape. The markdown is the living artifact — when the sandbox ends, its content captures what was learned. Other commands (not covered in detail above): `hotdata connections new` (interactive connection wizard), `hotdata skills install|status`, `hotdata completions `. diff --git a/skills/hotdata/references/DATA_MODEL.template.md b/skills/hotdata/references/DATA_MODEL.template.md index b9fb25a..4b6aee4 100644 --- a/skills/hotdata/references/DATA_MODEL.template.md +++ b/skills/hotdata/references/DATA_MODEL.template.md @@ -76,14 +76,6 @@ Stable `datasets.main.*` tables built for **Chain** workflows (not necessarily u |------------|------------|---------|-------------| | | | | | -## Saved query index (Library) - -Link business questions to saved queries (ids/names from `hotdata queries list`): - -| Question / report | Saved query name | ID (optional) | -|-------------------|------------------|---------------| -| | | | - ## Notes Assumptions, known gaps, and refresh checklist. diff --git a/skills/hotdata/references/WORKFLOWS.md b/skills/hotdata/references/WORKFLOWS.md index 392fb7c..d238f49 100644 --- a/skills/hotdata/references/WORKFLOWS.md +++ b/skills/hotdata/references/WORKFLOWS.md @@ -1,14 +1,13 @@ # Hotdata CLI workflows -Procedures for **Model**, **Library**, **History**, **Chain**, and **Indexes**. These compose existing `hotdata` commands; they are not separate subcommands. +Procedures for **Model**, **History**, **Chain**, and **Indexes**. These compose existing `hotdata` commands; they are not separate subcommands. ## Where files live | Concept | Location | |--------|----------| | **Model** | Your **project** root or `docs/` (e.g. `DATA_MODEL.md` / `data_model.md`). Never store workspace-specific model text inside agent skill directories. | -| **Library** | Hotdata **saved queries** (`queries create` / `list` / `run`). Optional local index (e.g. `QUERIES.md`) listing names and intent. | -| **History** | `hotdata results list` / `results `; saved queries. Optional append-only log under `.hotdata/query-log.jsonl` if you add a wrapper. | +| **History** | `hotdata queries list` / `queries ` for query runs (execution history); `hotdata results list` / `results ` for row data. | | **Chain** | Intermediate tables in **`datasets.main.*`**; document stable ones in the Model file under **Derived tables (Chain)**. | | **Indexes** | Recommendations and decisions live in Hotdata (`indexes list` / `indexes create`). Optional project log (e.g. `INDEXES.md`) if you track rationale outside the catalog. | @@ -52,36 +51,18 @@ hotdata query "SELECT * FROM ..
LIMIT 5" --- -## Library - -**Goal:** Repeatable SQL as **saved queries** so agents use `queries run` instead of pasting ad hoc SQL. - -### Promote a query +## History -```bash -hotdata queries create --name "Descriptive Name" --sql "SELECT ..." [--description "..."] [--tags "a,b"] -``` +**Goal:** Find prior work: query runs (execution history) and stored result rows. -### Use the library +### Query runs ```bash -hotdata queries list -hotdata queries -hotdata queries run -hotdata queries update [--name ...] [--sql ...] [--tags ...] [--category ...] [--table-size ...] +hotdata queries list [--limit N] [--cursor ] [--status ] +hotdata queries ``` -**Suggestions** from past sessions are not generated by the CLI today; capture candidates manually or with your own tooling, then `queries create` after review. - -### Optional project index - -Maintain `QUERIES.md` (or a section in `DATA_MODEL.md`) mapping **business questions** → saved query name or id. - ---- - -## History - -**Goal:** Find prior work: stored results and saved definitions. +`queries list` returns recent executions with status, duration, row count, and a SQL preview (default limit 20). Filter with `--status` (e.g. `--status failed`). The detail view shows full timings, the `result_id` (if any), and the formatted SQL. ### Results @@ -90,16 +71,7 @@ hotdata results list [-w ] [--limit N] [--offset N] hotdata results [-w ] ``` -Query footers include a `result-id` when applicable—record it for later. **Prefer `hotdata results ` over re-running identical heavy SQL.** - -### Saved queries as history - -```bash -hotdata queries list -hotdata queries -``` - -**Limitation:** Ad-hoc `hotdata query "..."` text is not listed unless you still have the `result_id`, a saved query, or a **local log** (e.g. append JSON lines to `.hotdata/query-log.jsonl` from a wrapper script). +Query footers include a `result-id` when applicable—record it for later, or pick it up from `queries `. **Prefer `hotdata results ` over re-running identical heavy SQL.** --- @@ -109,11 +81,9 @@ hotdata queries **Pattern:** materialize → query `datasets.main.*`. -1. **Base** — run saved or ad hoc SQL: +1. **Base** — run SQL: ```bash - hotdata queries run - # or hotdata query "SELECT ..." ``` @@ -147,15 +117,13 @@ hotdata queries ### 1. Gather workload and schema -- **Saved queries** — Inspect SQL for recurring `WHERE`, `JOIN`, `GROUP BY`, `ORDER BY`, and any use of full-text or vector access (e.g. SQL that calls `bm25_search`, or workloads you run via **`hotdata search`** — see main skill **Search**). +- **Query-run history** — Inspect recent runs for recurring `WHERE`, `JOIN`, `GROUP BY`, `ORDER BY`, and any use of full-text or vector access (e.g. SQL that calls `bm25_search`, or workloads you run via **`hotdata search`** — see main skill **Search**). ```bash hotdata queries list - hotdata queries + hotdata queries ``` -- **Ad-hoc SQL** — Use the same lens on queries from session history or a local log, if you keep one (see **History**). - - **Table/column types** — Confirm columns exist and types fit the index you plan: ```bash diff --git a/src/api.rs b/src/api.rs index dc46266..a3a5a7e 100644 --- a/src/api.rs +++ b/src/api.rs @@ -8,7 +8,7 @@ pub struct ApiClient { api_key: String, pub api_url: String, workspace_id: Option, - session_id: Option, + sandbox_id: Option, } impl ApiClient { @@ -36,12 +36,12 @@ impl ApiClient { api_key, api_url: profile_config.api_url.to_string(), workspace_id: workspace_id.map(String::from), - session_id: std::env::var("HOTDATA_SESSION").ok().or_else(|| { - if crate::sessions::find_session_run_ancestor().is_some() { - eprintln!("error: session has been lost -- restart the process"); + sandbox_id: std::env::var("HOTDATA_SANDBOX").ok().or_else(|| { + if crate::sandbox::find_sandbox_run_ancestor().is_some() { + eprintln!("error: sandbox has been lost -- restart the process"); std::process::exit(1); } - profile_config.session + profile_config.sandbox }), } } @@ -56,7 +56,7 @@ impl ApiClient { if let Some(ref ws) = self.workspace_id { headers.push(("X-Workspace-Id", ws.clone())); } - if let Some(ref sid) = self.session_id { + if let Some(ref sid) = self.sandbox_id { headers.push(("X-Session-Id", sid.clone())); } headers @@ -74,7 +74,7 @@ impl ApiClient { if let Some(ref ws) = self.workspace_id { req = req.header("X-Workspace-Id", ws); } - if let Some(ref sid) = self.session_id { + if let Some(ref sid) = self.sandbox_id { req = req.header("X-Session-Id", sid); } req @@ -208,66 +208,6 @@ impl ApiClient { util::debug_response(resp) } - /// POST request with no body (e.g. execute endpoints), returns parsed response. - pub fn post_empty(&self, path: &str) -> T { - let url = format!("{}{path}", self.api_url); - self.log_request("POST", &url, None); - - let resp = match self.build_request(reqwest::Method::POST, &url).send() { - Ok(r) => r, - Err(e) => { - eprintln!("error connecting to API: {e}"); - std::process::exit(1); - } - }; - - let (status, resp_body) = util::debug_response(resp); - if !status.is_success() { - eprintln!("{}", util::api_error(resp_body).red()); - std::process::exit(1); - } - - match serde_json::from_str(&resp_body) { - Ok(v) => v, - Err(e) => { - eprintln!("error parsing response: {e}"); - std::process::exit(1); - } - } - } - - /// PUT request with JSON body, returns parsed response. - pub fn put(&self, path: &str, body: &serde_json::Value) -> T { - let url = format!("{}{path}", self.api_url); - self.log_request("PUT", &url, Some(body)); - - let resp = match self.build_request(reqwest::Method::PUT, &url) - .json(body) - .send() - { - Ok(r) => r, - Err(e) => { - eprintln!("error connecting to API: {e}"); - std::process::exit(1); - } - }; - - let (status, resp_body) = util::debug_response(resp); - if !status.is_success() { - eprintln!("{}", util::api_error(resp_body).red()); - std::process::exit(1); - } - - match serde_json::from_str(&resp_body) { - Ok(v) => v, - Err(e) => { - eprintln!("error parsing response: {e}"); - std::process::exit(1); - } - } - } - - /// PATCH request with JSON body, returns parsed response. pub fn patch(&self, path: &str, body: &serde_json::Value) -> T { let url = format!("{}{path}", self.api_url); diff --git a/src/command.rs b/src/command.rs index 62d56c6..facc427 100644 --- a/src/command.rs +++ b/src/command.rs @@ -159,12 +159,12 @@ pub enum Commands { output: String, }, - /// Manage saved queries + /// Inspect query run history Queries { - /// Query ID to show details + /// Query run ID to show details id: Option, - /// Output format (used with query ID) + /// Output format (used with query run ID) #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "yaml"])] output: String, @@ -172,9 +172,9 @@ pub enum Commands { command: Option, }, - /// Manage work sessions - Sessions { - /// Session ID to show details + /// Manage sandboxes + Sandbox { + /// Sandbox ID to show details id: Option, /// Workspace ID (defaults to first workspace from login) @@ -186,7 +186,7 @@ pub enum Commands { output: String, #[command(subcommand)] - command: Option, + command: Option, }, /// Generate shell completions @@ -476,82 +476,19 @@ pub enum ResultsCommands { #[derive(Subcommand)] pub enum QueriesCommands { - /// List saved queries + /// List query runs List { /// Maximum number of results - #[arg(long)] - limit: Option, - - /// Pagination offset - #[arg(long)] - offset: Option, - - /// Output format - #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "yaml"])] - output: String, - }, - - /// Create a new saved query - Create { - /// Query name - #[arg(long)] - name: String, - - /// SQL query string - #[arg(long)] - sql: String, - - /// Query description - #[arg(long)] - description: Option, - - /// Comma-separated tags - #[arg(long)] - tags: Option, - - /// Output format - #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "yaml"])] - output: String, - }, - - /// Execute a saved query - Run { - /// Saved query ID - id: String, - - /// Output format - #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "csv"])] - output: String, - }, - - /// Update a saved query - Update { - /// Saved query ID - id: String, - - /// New query name - #[arg(long)] - name: Option, - - /// New SQL query string - #[arg(long)] - sql: Option, - - /// New description - #[arg(long)] - description: Option, - - /// Comma-separated tags - #[arg(long)] - tags: Option, + #[arg(long, default_value_t = 20)] + limit: u32, - /// Override the auto-detected category (pass empty string to clear) + /// Pagination cursor from a previous response #[arg(long)] - category: Option, + cursor: Option, - /// User annotation for table size (pass empty string to clear) + /// Filter by status (comma-separated, e.g. running,failed) #[arg(long)] - table_size: Option, + status: Option, /// Output format #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "yaml"])] @@ -560,17 +497,17 @@ pub enum QueriesCommands { } #[derive(Subcommand)] -pub enum SessionsCommands { - /// List all sessions in a workspace +pub enum SandboxCommands { + /// List all sandboxes in a workspace List { /// Output format #[arg(long = "output", short = 'o', default_value = "table", value_parser = ["table", "json", "yaml"])] output: String, }, - /// Create a new session and set it as active + /// Create a new sandbox and set it as active New { - /// Session name + /// Sandbox name #[arg(long)] name: Option, @@ -579,12 +516,12 @@ pub enum SessionsCommands { output: String, }, - /// Update a session's markdown or name + /// Update a sandbox's markdown or name Update { - /// Session ID (defaults to active session) + /// Sandbox ID (defaults to active sandbox) id: Option, - /// New session name + /// New sandbox name #[arg(long)] name: Option, @@ -597,20 +534,20 @@ pub enum SessionsCommands { output: String, }, - /// Print the markdown content of the current session + /// Print the markdown content of the current sandbox Read, - /// Set the active session (omit ID to clear) + /// Set the active sandbox (omit ID to clear) Set { - /// Session ID to set as active (omit to clear) + /// Sandbox ID to set as active (omit to clear) id: Option, }, - /// Run a command with a hotdata session. Creates a new session unless an ID was provided. - /// Example: hotdata sessions run claude - /// Example: hotdata sessions run claude + /// Run a command inside a hotdata sandbox. Creates a new sandbox unless an ID was provided. + /// Example: hotdata sandbox run claude + /// Example: hotdata sandbox run claude Run { - /// Session name (only used when creating a new session) + /// Sandbox name (only used when creating a new sandbox) #[arg(long)] name: Option, diff --git a/src/config.rs b/src/config.rs index c0aaa2b..0bf73c1 100644 --- a/src/config.rs +++ b/src/config.rs @@ -107,8 +107,8 @@ pub struct ProfileConfig { pub api_key_source: ApiKeySource, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub workspaces: Vec, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub session: Option, + #[serde(default, skip_serializing_if = "Option::is_none", alias = "session")] + pub sandbox: Option, } #[derive(Debug, Deserialize, Serialize)] @@ -215,7 +215,7 @@ pub fn save_default_workspace(profile: &str, workspace: WorkspaceEntry) -> Resul write_config(&config_path, &content) } -pub fn save_session(profile: &str, session_id: &str) -> Result<(), String> { +pub fn save_sandbox(profile: &str, sandbox_id: &str) -> Result<(), String> { let config_path = config_path()?; let mut config_file: ConfigFile = if config_path.exists() { @@ -230,14 +230,14 @@ pub fn save_session(profile: &str, session_id: &str) -> Result<(), String> { .profiles .entry(profile.to_string()) .or_default() - .session = Some(session_id.to_string()); + .sandbox = Some(sandbox_id.to_string()); let content = serde_yaml::to_string(&config_file) .map_err(|e| format!("error serializing config: {e}"))?; write_config(&config_path, &content) } -pub fn clear_session(profile: &str) -> Result<(), String> { +pub fn clear_sandbox(profile: &str) -> Result<(), String> { let config_path = config_path()?; if !config_path.exists() { @@ -250,7 +250,7 @@ pub fn clear_session(profile: &str) -> Result<(), String> { serde_yaml::from_str(&content).map_err(|e| format!("error parsing config file: {e}"))?; if let Some(entry) = config_file.profiles.get_mut(profile) { - entry.session = None; + entry.sandbox = None; } let content = serde_yaml::to_string(&config_file) diff --git a/src/main.rs b/src/main.rs index bf0f84e..53d999f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,7 +11,7 @@ mod jobs; mod queries; mod query; mod results; -mod sessions; +mod sandbox; mod skill; mod table; mod tables; @@ -20,7 +20,7 @@ mod workspace; use anstyle::AnsiColor; use clap::{Parser, builder::Styles}; -use command::{AuthCommands, Commands, ConnectionsCommands, ConnectionsCreateCommands, DatasetsCommands, IndexesCommands, JobsCommands, QueriesCommands, QueryCommands, ResultsCommands, SessionsCommands, SkillCommands, TablesCommands, WorkspaceCommands}; +use command::{AuthCommands, Commands, ConnectionsCommands, ConnectionsCreateCommands, DatasetsCommands, IndexesCommands, JobsCommands, QueriesCommands, QueryCommands, ResultsCommands, SandboxCommands, SkillCommands, TablesCommands, WorkspaceCommands}; #[derive(Parser)] #[command(name = "hotdata", version, about = concat!("Hotdata CLI - Command line interface for Hotdata (v", env!("CARGO_PKG_VERSION"), ")"), long_about = None, disable_version_flag = true)] @@ -35,7 +35,7 @@ struct Cli { api_key: Option, /// Print verbose API request and response details - #[arg(long, global = true)] + #[arg(long, global = true, hide = true)] debug: bool, #[command(subcommand)] @@ -53,7 +53,7 @@ fn resolve_workspace(provided: Option) -> String { } return ws; } - if sessions::find_session_run_ancestor().is_some() { + if sandbox::find_sandbox_run_ancestor().is_some() { eprintln!("error: workspace has been lost -- restart the process"); std::process::exit(1); } @@ -313,17 +313,8 @@ fn main() { queries::get(&id, &workspace_id, &output) } else { match command { - Some(QueriesCommands::List { limit, offset, output }) => { - queries::list(&workspace_id, limit, offset, &output) - } - Some(QueriesCommands::Run { id, output }) => { - queries::run(&id, &workspace_id, &output) - } - Some(QueriesCommands::Create { name, sql, description, tags, output }) => { - queries::create(&workspace_id, &name, &sql, description.as_deref(), tags.as_deref(), &output) - } - Some(QueriesCommands::Update { id, name, sql, description, tags, category, table_size, output }) => { - queries::update(&workspace_id, &id, name.as_deref(), sql.as_deref(), description.as_deref(), tags.as_deref(), category.as_deref(), table_size.as_deref(), &output) + Some(QueriesCommands::List { limit, cursor, status, output }) => { + queries::list(&workspace_id, Some(limit), cursor.as_deref(), status.as_deref(), &output) } None => { use clap::CommandFactory; @@ -334,55 +325,55 @@ fn main() { } } } - Commands::Sessions { id, workspace_id, output, command } => { + Commands::Sandbox { id, workspace_id, output, command } => { let workspace_id = resolve_workspace(workspace_id); match command { - Some(SessionsCommands::Run { name, cmd }) => { - sessions::run(id.as_deref(), &workspace_id, name.as_deref(), &cmd) + Some(SandboxCommands::Run { name, cmd }) => { + sandbox::run(id.as_deref(), &workspace_id, name.as_deref(), &cmd) } - Some(SessionsCommands::List { output }) => { - sessions::list(&workspace_id, &output) + Some(SandboxCommands::List { output }) => { + sandbox::list(&workspace_id, &output) } - Some(SessionsCommands::New { name, output }) => { - sessions::new(&workspace_id, name.as_deref(), &output) + Some(SandboxCommands::New { name, output }) => { + sandbox::new(&workspace_id, name.as_deref(), &output) } - Some(SessionsCommands::Update { id: update_id, name, markdown, output }) => { - let session_id = update_id.or(id).or_else(|| { - config::load("default").ok().and_then(|p| p.session) + Some(SandboxCommands::Update { id: update_id, name, markdown, output }) => { + let sandbox_id = update_id.or(id).or_else(|| { + config::load("default").ok().and_then(|p| p.sandbox) }); - match session_id { - Some(sid) => sessions::update(&workspace_id, &sid, name.as_deref(), markdown.as_deref(), &output), + match sandbox_id { + Some(sid) => sandbox::update(&workspace_id, &sid, name.as_deref(), markdown.as_deref(), &output), None => { - eprintln!("error: no session ID provided and no active session set. Use 'sessions new' or 'sessions set '."); + eprintln!("error: no sandbox ID provided and no active sandbox set. Use 'sandbox new' or 'sandbox set '."); std::process::exit(1); } } } - Some(SessionsCommands::Read) => { - let session_id = id.or_else(|| { - std::env::var("HOTDATA_SESSION").ok() + Some(SandboxCommands::Read) => { + let sandbox_id = id.or_else(|| { + std::env::var("HOTDATA_SANDBOX").ok() }).or_else(|| { - config::load("default").ok().and_then(|p| p.session) + config::load("default").ok().and_then(|p| p.sandbox) }); - match session_id { - Some(sid) => sessions::read(&sid, &workspace_id), + match sandbox_id { + Some(sid) => sandbox::read(&sid, &workspace_id), None => { - eprintln!("error: no active session. Use 'sessions new' or 'sessions set '."); + eprintln!("error: no active sandbox. Use 'sandbox new' or 'sandbox set '."); std::process::exit(1); } } } - Some(SessionsCommands::Set { id: set_id }) => { - sessions::set(set_id.as_deref(), &workspace_id) + Some(SandboxCommands::Set { id: set_id }) => { + sandbox::set(set_id.as_deref(), &workspace_id) } None => { match id { - Some(id) => sessions::get(&id, &workspace_id, &output), + Some(id) => sandbox::get(&id, &workspace_id, &output), None => { use clap::CommandFactory; let mut cmd = Cli::command(); cmd.build(); - cmd.find_subcommand_mut("sessions").unwrap().print_help().unwrap(); + cmd.find_subcommand_mut("sandbox").unwrap().print_help().unwrap(); } } } diff --git a/src/queries.rs b/src/queries.rs index 104fdb5..63259b2 100644 --- a/src/queries.rs +++ b/src/queries.rs @@ -1,7 +1,6 @@ use crate::api::ApiClient; -use crossterm::style::Stylize; +use crossterm::style::{Color, Stylize}; use serde::{Deserialize, Serialize}; -use serde_json::Value; const SQL_KEYWORDS: &[&str] = &[ "SELECT", "FROM", "WHERE", "AND", "OR", "NOT", "IN", "IS", "NULL", "AS", @@ -109,114 +108,149 @@ fn highlight_sql(sql: &str) -> String { } #[derive(Deserialize, Serialize)] -struct SavedQuery { +struct QueryRun { id: String, - name: String, - description: String, - tags: Vec, - latest_version: u64, + status: String, created_at: String, - updated_at: String, -} - -#[derive(Deserialize, Serialize)] -struct SavedQueryDetail { - id: String, - name: String, - description: String, - sql: String, + completed_at: Option, + execution_time_ms: Option, + server_processing_ms: Option, + row_count: Option, + saved_query_id: Option, + saved_query_version: Option, + snapshot_id: String, sql_hash: String, - tags: Vec, - latest_version: u64, - #[serde(default)] - category: Value, - #[serde(default)] - has_aggregation: Value, - #[serde(default)] - has_group_by: Value, - #[serde(default)] - has_join: Value, - #[serde(default)] - has_limit: Value, - #[serde(default)] - has_order_by: Value, - #[serde(default)] - has_predicate: Value, - #[serde(default)] - num_tables: Value, - #[serde(default)] - table_size: Value, - created_at: String, - updated_at: String, + sql_text: String, + result_id: Option, + error_message: Option, + warning_message: Option, + trace_id: Option, + user_public_id: Option, } #[derive(Deserialize)] struct ListResponse { - queries: Vec, + query_runs: Vec, count: u64, has_more: bool, + next_cursor: Option, +} + +fn color_status(status: &str) -> String { + let color = match status { + "succeeded" => Color::Green, + "failed" => Color::Red, + "running" | "queued" | "pending" => Color::Yellow, + _ => Color::Reset, + }; + status.with(color).to_string() } -pub fn list(workspace_id: &str, limit: Option, offset: Option, format: &str) { +fn truncate_sql(sql: &str, max: usize) -> String { + let flat = sql.split_whitespace().collect::>().join(" "); + if flat.chars().count() <= max { + flat + } else { + let prefix: String = flat.chars().take(max.saturating_sub(1)).collect(); + format!("{prefix}…") + } +} + +pub fn list( + workspace_id: &str, + limit: Option, + cursor: Option<&str>, + status: Option<&str>, + format: &str, +) { let api = ApiClient::new(Some(workspace_id)); + let params = [ ("limit", limit.map(|l| l.to_string())), - ("offset", offset.map(|o| o.to_string())), + ("cursor", cursor.map(str::to_string)), + ("status", status.map(str::to_string)), ]; - let body: ListResponse = api.get_with_params("/queries", ¶ms); + let body: ListResponse = api.get_with_params("/query-runs", ¶ms); match format { - "json" => println!("{}", serde_json::to_string_pretty(&body.queries).unwrap()), - "yaml" => print!("{}", serde_yaml::to_string(&body.queries).unwrap()), + "json" => println!("{}", serde_json::to_string_pretty(&body.query_runs).unwrap()), + "yaml" => print!("{}", serde_yaml::to_string(&body.query_runs).unwrap()), "table" => { - if body.queries.is_empty() { - eprintln!("{}", "No saved queries found.".dark_grey()); + if body.query_runs.is_empty() { + eprintln!("{}", "No query runs found.".dark_grey()); } else { - let rows: Vec> = body.queries.iter().map(|q| vec![ - q.id.clone(), - q.name.clone(), - q.description.clone(), - q.tags.join(", "), - q.latest_version.to_string(), - crate::util::format_date(&q.updated_at), + let rows: Vec> = body.query_runs.iter().map(|r| vec![ + r.id.clone(), + color_status(&r.status), + crate::util::format_date(&r.created_at), + r.execution_time_ms.map(|ms| ms.to_string()).unwrap_or_else(|| "-".to_string()), + r.row_count.map(|n| n.to_string()).unwrap_or_else(|| "-".to_string()), + truncate_sql(&r.sql_text, 60), ]).collect(); - crate::table::print(&["ID", "NAME", "DESCRIPTION", "TAGS", "VERSION", "UPDATED"], &rows); + crate::table::print(&["ID", "STATUS", "CREATED", "DURATION_MS", "ROWS", "SQL"], &rows); } if body.has_more { - let next = offset.unwrap_or(0) + body.count as u32; - eprintln!("{}", format!("showing {} results — use --offset {next} for more", body.count).dark_grey()); + let next = body.next_cursor.as_deref().unwrap_or(""); + eprintln!("{}", format!("showing {} results — use --cursor {next} for more", body.count).dark_grey()); } } _ => unreachable!(), } } -pub fn get(query_id: &str, workspace_id: &str, format: &str) { +pub fn get(query_run_id: &str, workspace_id: &str, format: &str) { let api = ApiClient::new(Some(workspace_id)); - let path = format!("/queries/{query_id}"); - let q: SavedQueryDetail = api.get(&path); - print_detail(&q, format); + let path = format!("/query-runs/{query_run_id}"); + let run: QueryRun = api.get(&path); + print_detail(&run, format); } -fn print_detail(q: &SavedQueryDetail, format: &str) { +fn print_detail(r: &QueryRun, format: &str) { match format { - "json" => println!("{}", serde_json::to_string_pretty(q).unwrap()), - "yaml" => print!("{}", serde_yaml::to_string(q).unwrap()), + "json" => println!("{}", serde_json::to_string_pretty(r).unwrap()), + "yaml" => print!("{}", serde_yaml::to_string(r).unwrap()), "table" => { - let label = |l: &str| format!("{:<12}", l).dark_grey().to_string(); - println!("{}{}", label("id:"), q.id); - println!("{}{}", label("name:"), q.name); - println!("{}{}", label("description:"), q.description); - println!("{}{}", label("version:"), q.latest_version); - if !q.tags.is_empty() { - println!("{}{}", label("tags:"), q.tags.join(", ")); + let label = |l: &str| format!("{:<14}", l).dark_grey().to_string(); + println!("{}{}", label("id:"), r.id); + println!("{}{}", label("status:"), color_status(&r.status)); + println!("{}{}", label("created:"), crate::util::format_date(&r.created_at)); + if let Some(ref c) = r.completed_at { + println!("{}{}", label("completed:"), crate::util::format_date(c)); + } + if let Some(ms) = r.execution_time_ms { + println!("{}{} ms", label("duration:"), ms); + } + if let Some(ms) = r.server_processing_ms { + println!("{}{} ms", label("server time:"), ms); + } + if let Some(n) = r.row_count { + println!("{}{}", label("rows:"), n); + } + if let Some(ref id) = r.result_id { + println!("{}{}", label("result id:"), id); + } + if let Some(ref id) = r.saved_query_id { + let version = r.saved_query_version.map(|v| format!(" (v{v})")).unwrap_or_default(); + println!("{}{}{}", label("saved query:"), id, version); + } + println!("{}{}", label("snapshot:"), r.snapshot_id); + println!("{}{}", label("sql hash:"), r.sql_hash); + if let Some(ref id) = r.trace_id { + println!("{}{}", label("trace:"), id); + } + if let Some(ref id) = r.user_public_id { + println!("{}{}", label("user:"), id); + } + if let Some(ref msg) = r.warning_message { + println!("{}{}", label("warning:"), msg.as_str().yellow()); + } + if let Some(ref msg) = r.error_message { + println!("{}{}", label("error:"), msg.as_str().red()); } - println!("{}{}", label("created:"), crate::util::format_date(&q.created_at)); - println!("{}{}", label("updated:"), crate::util::format_date(&q.updated_at)); println!(); println!("{}", "SQL:".dark_grey()); let formatted = sqlformat::format( - &q.sql, + &r.sql_text, &sqlformat::QueryParams::None, &sqlformat::FormatOptions { indent: sqlformat::Indent::Spaces(2), @@ -230,75 +264,3 @@ fn print_detail(q: &SavedQueryDetail, format: &str) { _ => unreachable!(), } } - -fn parse_tags(tags: Option<&str>) -> Option> { - tags.map(|t| t.split(',').map(str::trim).collect()) -} - -pub fn create( - workspace_id: &str, - name: &str, - sql: &str, - description: Option<&str>, - tags: Option<&str>, - format: &str, -) { - let api = ApiClient::new(Some(workspace_id)); - - let mut body = serde_json::json!({ "name": name, "sql": sql }); - if let Some(d) = description { body["description"] = serde_json::json!(d); } - if let Some(tags) = parse_tags(tags) { body["tags"] = serde_json::json!(tags); } - - let q: SavedQueryDetail = api.post("/queries", &body); - - println!("{}", "Query created".green()); - print_detail(&q, format); -} - -pub fn update( - workspace_id: &str, - id: &str, - name: Option<&str>, - sql: Option<&str>, - description: Option<&str>, - tags: Option<&str>, - category: Option<&str>, - table_size: Option<&str>, - format: &str, -) { - if name.is_none() && sql.is_none() && description.is_none() && tags.is_none() && category.is_none() && table_size.is_none() { - eprintln!("error: no fields to update. Provide at least one of --name, --sql, --description, --tags, --category, or --table-size."); - std::process::exit(1); - } - - let api = ApiClient::new(Some(workspace_id)); - - let mut body = serde_json::json!({}); - if let Some(n) = name { body["name"] = serde_json::json!(n); } - if let Some(s) = sql { body["sql"] = serde_json::json!(s); } - if let Some(d) = description { body["description"] = serde_json::json!(d); } - if let Some(tags) = parse_tags(tags) { body["tags"] = serde_json::json!(tags); } - match category { - Some("") => { body["category_override"] = serde_json::json!(null); } - Some(c) => { body["category_override"] = serde_json::json!(c); } - None => {} - } - match table_size { - Some("") => { body["table_size_override"] = serde_json::json!(null); } - Some(ts) => { body["table_size_override"] = serde_json::json!(ts); } - None => {} - } - - let path = format!("/queries/{id}"); - let q: SavedQueryDetail = api.put(&path, &body); - - println!("{}", "Query updated".green()); - print_detail(&q, format); -} - -pub fn run(query_id: &str, workspace_id: &str, format: &str) { - let api = ApiClient::new(Some(workspace_id)); - let path = format!("/queries/{query_id}/execute"); - let result: crate::query::QueryResponse = api.post_empty(&path); - crate::query::print_result(&result, format); -} diff --git a/src/sessions.rs b/src/sandbox.rs similarity index 68% rename from src/sessions.rs rename to src/sandbox.rs index 3f5ce4f..d1251dc 100644 --- a/src/sessions.rs +++ b/src/sandbox.rs @@ -4,7 +4,7 @@ use crossterm::style::Stylize; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] -struct Session { +struct Sandbox { public_id: String, name: String, markdown: String, @@ -14,31 +14,33 @@ struct Session { #[derive(Deserialize)] struct ListResponse { - sessions: Vec, + #[serde(rename = "sessions")] + sandboxes: Vec, } #[derive(Deserialize)] struct DetailResponse { - session: Session, + #[serde(rename = "session")] + sandbox: Sandbox, } pub fn list(workspace_id: &str, format: &str) { let api = ApiClient::new(Some(workspace_id)); let body: ListResponse = api.get("/sessions"); - let current_session = std::env::var("HOTDATA_SESSION") + let current_sandbox = std::env::var("HOTDATA_SANDBOX") .ok() - .or_else(|| config::load("default").ok().and_then(|p| p.session)); + .or_else(|| config::load("default").ok().and_then(|p| p.sandbox)); match format { - "json" => println!("{}", serde_json::to_string_pretty(&body.sessions).unwrap()), - "yaml" => print!("{}", serde_yaml::to_string(&body.sessions).unwrap()), + "json" => println!("{}", serde_json::to_string_pretty(&body.sandboxes).unwrap()), + "yaml" => print!("{}", serde_yaml::to_string(&body.sandboxes).unwrap()), "table" => { - if body.sessions.is_empty() { - eprintln!("{}", "No sessions found.".dark_grey()); + if body.sandboxes.is_empty() { + eprintln!("{}", "No sandboxes found.".dark_grey()); } else { - let rows: Vec> = body.sessions.iter().map(|s| { - let marker = if current_session.as_deref() == Some(&s.public_id) { "*" } else { "" }; + let rows: Vec> = body.sandboxes.iter().map(|s| { + let marker = if current_sandbox.as_deref() == Some(&s.public_id) { "*" } else { "" }; vec![ marker.to_string(), s.public_id.clone(), @@ -53,11 +55,11 @@ pub fn list(workspace_id: &str, format: &str) { } } -pub fn get(session_id: &str, workspace_id: &str, format: &str) { +pub fn get(sandbox_id: &str, workspace_id: &str, format: &str) { let api = ApiClient::new(Some(workspace_id)); - let path = format!("/sessions/{session_id}"); + let path = format!("/sessions/{sandbox_id}"); let body: DetailResponse = api.get(&path); - let s = &body.session; + let s = &body.sandbox; match format { "json" => println!("{}", serde_json::to_string_pretty(s).unwrap()), @@ -78,30 +80,30 @@ pub fn get(session_id: &str, workspace_id: &str, format: &str) { } } -pub fn read(session_id: &str, workspace_id: &str) { +pub fn read(sandbox_id: &str, workspace_id: &str) { let api = ApiClient::new(Some(workspace_id)); - let path = format!("/sessions/{session_id}"); + let path = format!("/sessions/{sandbox_id}"); let body: DetailResponse = api.get(&path); - if body.session.markdown.is_empty() { - eprintln!("{}", "Session markdown is empty.".dark_grey()); + if body.sandbox.markdown.is_empty() { + eprintln!("{}", "Sandbox markdown is empty.".dark_grey()); } else { - print!("{}", body.session.markdown); + print!("{}", body.sandbox.markdown); } } -fn check_session_lock() { - if std::env::var("HOTDATA_SESSION").is_ok() || find_session_run_ancestor().is_some() { - eprintln!("error: session is locked"); +fn check_sandbox_lock() { + if std::env::var("HOTDATA_SANDBOX").is_ok() || find_sandbox_run_ancestor().is_some() { + eprintln!("error: sandbox is locked"); std::process::exit(1); } } -pub fn find_session_run_ancestor() -> Option { +pub fn find_sandbox_run_ancestor() -> Option { static CACHED: std::sync::OnceLock> = std::sync::OnceLock::new(); - *CACHED.get_or_init(find_session_run_ancestor_inner) + *CACHED.get_or_init(find_sandbox_run_ancestor_inner) } -fn find_session_run_ancestor_inner() -> Option { +fn find_sandbox_run_ancestor_inner() -> Option { use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind}; let sys = System::new_with_specifics( @@ -117,7 +119,7 @@ fn find_session_run_ancestor_inner() -> Option { let proc = sys.process(pid)?; let name = proc.name().to_string_lossy(); if name == "hotdata" { - if proc.cmd().iter().any(|a| a == "sessions") + if proc.cmd().iter().any(|a| a == "sandbox") && proc.cmd().iter().any(|a| a == "run") { return Some(pid); @@ -129,7 +131,7 @@ fn find_session_run_ancestor_inner() -> Option { } pub fn new(workspace_id: &str, name: Option<&str>, format: &str) { - check_session_lock(); + check_sandbox_lock(); let api = ApiClient::new(Some(workspace_id)); let mut body = serde_json::json!({}); @@ -138,14 +140,14 @@ pub fn new(workspace_id: &str, name: Option<&str>, format: &str) { } let resp: DetailResponse = api.post("/sessions", &body); - let s = &resp.session; + let s = &resp.sandbox; - // Set as the active session in config - if let Err(e) = config::save_session("default", &s.public_id) { - eprintln!("warning: could not save session to config: {e}"); + // Set as the active sandbox in config + if let Err(e) = config::save_sandbox("default", &s.public_id) { + eprintln!("warning: could not save sandbox to config: {e}"); } - println!("{}", "Session created".green()); + println!("{}", "Sandbox created".green()); match format { "json" => println!("{}", serde_json::to_string_pretty(s).unwrap()), "yaml" => print!("{}", serde_yaml::to_string(s).unwrap()), @@ -159,7 +161,7 @@ pub fn new(workspace_id: &str, name: Option<&str>, format: &str) { } } -pub fn update(workspace_id: &str, session_id: &str, name: Option<&str>, markdown: Option<&str>, format: &str) { +pub fn update(workspace_id: &str, sandbox_id: &str, name: Option<&str>, markdown: Option<&str>, format: &str) { if name.is_none() && markdown.is_none() { eprintln!("error: provide at least one of --name or --markdown."); std::process::exit(1); @@ -171,11 +173,11 @@ pub fn update(workspace_id: &str, session_id: &str, name: Option<&str>, markdown if let Some(n) = name { body["name"] = serde_json::json!(n); } if let Some(m) = markdown { body["markdown"] = serde_json::json!(m); } - let path = format!("/sessions/{session_id}"); + let path = format!("/sessions/{sandbox_id}"); let resp: DetailResponse = api.patch(&path, &body); - let s = &resp.session; + let s = &resp.sandbox; - println!("{}", "Session updated".green()); + println!("{}", "Sandbox updated".green()); match format { "json" => println!("{}", serde_json::to_string_pretty(s).unwrap()), "yaml" => print!("{}", serde_yaml::to_string(s).unwrap()), @@ -189,34 +191,34 @@ pub fn update(workspace_id: &str, session_id: &str, name: Option<&str>, markdown } } -pub fn run(session_id: Option<&str>, workspace_id: &str, name: Option<&str>, cmd: &[String]) { - check_session_lock(); - let sid = match session_id { +pub fn run(sandbox_id: Option<&str>, workspace_id: &str, name: Option<&str>, cmd: &[String]) { + check_sandbox_lock(); + let sid = match sandbox_id { Some(id) => { - // Verify the session exists + // Verify the sandbox exists let api = ApiClient::new(Some(workspace_id)); let path = format!("/sessions/{id}"); let _: DetailResponse = api.get(&path); id.to_string() } None => { - // Create a new session + // Create a new sandbox let api = ApiClient::new(Some(workspace_id)); let mut body = serde_json::json!({}); if let Some(n) = name { body["name"] = serde_json::json!(n); } let resp: DetailResponse = api.post("/sessions", &body); - resp.session.public_id + resp.sandbox.public_id } }; - eprintln!("{} {}", "session:".dark_grey(), sid); + eprintln!("{} {}", "sandbox:".dark_grey(), sid); eprintln!("{} {}", "workspace:".dark_grey(), workspace_id); let status = std::process::Command::new(&cmd[0]) .args(&cmd[1..]) - .env("HOTDATA_SESSION", &sid) + .env("HOTDATA_SANDBOX", &sid) .env("HOTDATA_WORKSPACE", workspace_id) .status(); @@ -234,41 +236,41 @@ mod tests { use super::*; #[test] - fn find_session_run_ancestor_returns_none_in_test() { - // No `hotdata sessions run` ancestor exists in the test runner - assert!(find_session_run_ancestor_inner().is_none()); + fn find_sandbox_run_ancestor_returns_none_in_test() { + // No `hotdata sandbox run` ancestor exists in the test runner + assert!(find_sandbox_run_ancestor_inner().is_none()); } #[test] - fn find_session_run_ancestor_cached_matches_inner() { + fn find_sandbox_run_ancestor_cached_matches_inner() { // The cached version should agree with the inner function - assert_eq!(find_session_run_ancestor(), find_session_run_ancestor_inner()); + assert_eq!(find_sandbox_run_ancestor(), find_sandbox_run_ancestor_inner()); } } -pub fn set(session_id: Option<&str>, workspace_id: &str) { - check_session_lock(); - match session_id { +pub fn set(sandbox_id: Option<&str>, workspace_id: &str) { + check_sandbox_lock(); + match sandbox_id { Some(id) => { - // Verify the session exists by fetching it + // Verify the sandbox exists by fetching it let api = ApiClient::new(Some(workspace_id)); let path = format!("/sessions/{id}"); let _: DetailResponse = api.get(&path); - if let Err(e) = config::save_session("default", id) { + if let Err(e) = config::save_sandbox("default", id) { eprintln!("error saving config: {e}"); std::process::exit(1); } - println!("{}", "Active session updated".green()); + println!("{}", "Active sandbox updated".green()); println!("id: {}", id); } None => { - // Clear the active session - if let Err(e) = config::clear_session("default") { + // Clear the active sandbox + if let Err(e) = config::clear_sandbox("default") { eprintln!("error saving config: {e}"); std::process::exit(1); } - println!("{}", "Active session cleared".green()); + println!("{}", "Active sandbox cleared".green()); } } } diff --git a/src/workspace.rs b/src/workspace.rs index 5b53697..6649706 100644 --- a/src/workspace.rs +++ b/src/workspace.rs @@ -17,7 +17,7 @@ struct ListResponse { } pub fn set(workspace_id: Option<&str>) { - if std::env::var("HOTDATA_WORKSPACE").is_ok() || crate::sessions::find_session_run_ancestor().is_some() { + if std::env::var("HOTDATA_WORKSPACE").is_ok() || crate::sandbox::find_sandbox_run_ancestor().is_some() { eprintln!("error: workspace is locked"); std::process::exit(1); } diff --git a/tests/session_env.rs b/tests/sandbox_env.rs similarity index 66% rename from tests/session_env.rs rename to tests/sandbox_env.rs index 32533b8..c1d5b07 100644 --- a/tests/session_env.rs +++ b/tests/sandbox_env.rs @@ -4,48 +4,48 @@ fn hotdata() -> Command { Command::new(env!("CARGO_BIN_EXE_hotdata")) } -// --- session lock tests --- +// --- sandbox lock tests --- #[test] -fn sessions_run_blocked_when_hotdata_session_set() { +fn sandbox_run_blocked_when_hotdata_sandbox_set() { let output = hotdata() - .args(["sessions", "run", "echo", "hi"]) - .env("HOTDATA_SESSION", "existing-session") + .args(["sandbox", "run", "echo", "hi"]) + .env("HOTDATA_SANDBOX", "existing-sandbox") .env("HOTDATA_WORKSPACE", "ws-1") .output() .unwrap(); assert!(!output.status.success()); let stderr = String::from_utf8_lossy(&output.stderr); - assert!(stderr.contains("session is locked"), "stderr: {stderr}"); + assert!(stderr.contains("sandbox is locked"), "stderr: {stderr}"); } #[test] -fn sessions_new_blocked_when_hotdata_session_set() { +fn sandbox_new_blocked_when_hotdata_sandbox_set() { let output = hotdata() - .args(["sessions", "new"]) - .env("HOTDATA_SESSION", "existing-session") + .args(["sandbox", "new"]) + .env("HOTDATA_SANDBOX", "existing-sandbox") .env("HOTDATA_WORKSPACE", "ws-1") .output() .unwrap(); assert!(!output.status.success()); let stderr = String::from_utf8_lossy(&output.stderr); - assert!(stderr.contains("session is locked"), "stderr: {stderr}"); + assert!(stderr.contains("sandbox is locked"), "stderr: {stderr}"); } #[test] -fn sessions_set_blocked_when_hotdata_session_set() { +fn sandbox_set_blocked_when_hotdata_sandbox_set() { let output = hotdata() - .args(["sessions", "set", "some-id"]) - .env("HOTDATA_SESSION", "existing-session") + .args(["sandbox", "set", "some-id"]) + .env("HOTDATA_SANDBOX", "existing-sandbox") .env("HOTDATA_WORKSPACE", "ws-1") .output() .unwrap(); assert!(!output.status.success()); let stderr = String::from_utf8_lossy(&output.stderr); - assert!(stderr.contains("session is locked"), "stderr: {stderr}"); + assert!(stderr.contains("sandbox is locked"), "stderr: {stderr}"); } // --- workspace env lock tests --- @@ -53,9 +53,9 @@ fn sessions_set_blocked_when_hotdata_session_set() { #[test] fn workspace_env_blocks_conflicting_flag() { let output = hotdata() - .args(["sessions", "-w", "other-ws", "list"]) + .args(["sandbox", "-w", "other-ws", "list"]) .env("HOTDATA_WORKSPACE", "locked-ws") - .env_remove("HOTDATA_SESSION") + .env_remove("HOTDATA_SANDBOX") .output() .unwrap(); @@ -72,9 +72,9 @@ fn workspace_env_allows_matching_flag() { // When the flag matches the env var, no workspace conflict error. // Will fail later on auth, but should NOT fail on workspace lock. let output = hotdata() - .args(["sessions", "-w", "ws-1", "list"]) + .args(["sandbox", "-w", "ws-1", "list"]) .env("HOTDATA_WORKSPACE", "ws-1") - .env_remove("HOTDATA_SESSION") + .env_remove("HOTDATA_SANDBOX") .output() .unwrap();