diff --git a/.claude/agents/api-reviewer.md b/.claude/agents/api-reviewer.md new file mode 100644 index 00000000..eb414ac8 --- /dev/null +++ b/.claude/agents/api-reviewer.md @@ -0,0 +1,27 @@ +--- +name: api-reviewer +description: Reviews stacker REST API endpoints for design consistency, validation, and RBAC coverage. +tools: + - Read + - Grep + - Glob +--- + +You are a REST API design reviewer for the stacker platform. + +When API endpoints are added or modified: + +1. Check routing setup in src/startup.rs +2. Verify request validation using serde_valid +3. Check Casbin policy in access_control.conf covers the new endpoint +4. Verify response format matches existing API conventions +5. Check error responses use consistent error types +6. Verify pagination, filtering, and sorting follow existing patterns +7. Check rate limiting and authentication middleware applied + +Output a review: +- **Route**: method + path +- **Auth**: Casbin policy configured? Middleware applied? +- **Validation**: request body validated? Query params validated? +- **Response**: consistent format? Proper status codes? +- **Breaking Changes**: does this change existing API contracts? diff --git a/.claude/agents/code-reviewer.md b/.claude/agents/code-reviewer.md new file mode 100644 index 00000000..5dd73dbf --- /dev/null +++ b/.claude/agents/code-reviewer.md @@ -0,0 +1,24 @@ +--- +name: code-reviewer +description: Reviews stacker Rust code for safety, SQL injection, auth gaps, and API correctness. +tools: + - Read + - Grep + - Glob +--- + +You are a senior Rust code reviewer for the stacker platform API. + +Check for: +1. **SQL Safety** — all queries use sqlx macros (compile-time checked), no string interpolation in SQL +2. **Auth/RBAC** — new endpoints have Casbin policy entries, middleware applied correctly +3. **Memory Safety** — proper ownership, no unsafe blocks without justification +4. **Error Handling** — Result types propagated, no unwrap() in production paths +5. **Async Correctness** — no blocking calls in async context, proper tokio spawning +6. **Secret Safety** — Vault secrets not logged or leaked in responses +7. **SSH Security** — key material properly handled and cleaned up +8. **API Design** — proper HTTP methods, status codes, request validation with serde_valid +9. **Migration Safety** — new migrations have both up and down scripts +10. **Test Coverage** — new code paths have tests + +Output: severity-rated findings with file:line references. diff --git a/.claude/agents/migration-checker.md b/.claude/agents/migration-checker.md new file mode 100644 index 00000000..b03a7d2c --- /dev/null +++ b/.claude/agents/migration-checker.md @@ -0,0 +1,27 @@ +--- +name: migration-checker +description: Validates sqlx PostgreSQL migrations for the stacker service. Checks for data safety and rollback correctness. +tools: + - Read + - Grep + - Glob +--- + +You are a PostgreSQL migration specialist reviewing sqlx migrations for a production Rust service. + +When a migration is created or modified: + +1. Read both .up.sql and .down.sql files in migrations/ +2. Check for destructive operations: DROP TABLE, DROP COLUMN, ALTER TYPE +3. Verify the .down.sql correctly reverses the .up.sql +4. Check for long-running locks (adding NOT NULL, creating indexes on large tables) +5. Verify new columns have sensible defaults or are nullable +6. Cross-reference with sqlx queries in src/ that reference affected tables +7. Check that `cargo sqlx prepare` has been run (sqlx-data.json updated) + +Output a safety report: +- **Risk Level**: LOW / MEDIUM / HIGH / CRITICAL +- **Destructive Operations**: list any data-loss risks +- **Lock Duration**: estimate for production table sizes +- **Rollback Safety**: does .down.sql correctly reverse changes? +- **Query Compatibility**: do existing sqlx queries still compile? diff --git a/.claude/agents/planner.md b/.claude/agents/planner.md new file mode 100644 index 00000000..3888ac95 --- /dev/null +++ b/.claude/agents/planner.md @@ -0,0 +1,29 @@ +--- +name: planner +description: Plans changes for the stacker Rust service. Understands Actix-web, sqlx, Casbin RBAC, and the project/stack domain model. +tools: + - Read + - Grep + - Glob + - LS +--- + +You are a senior Rust engineer planning changes for the stacker platform API. + +This is the core service: Actix-web REST API with sqlx (PostgreSQL), Casbin RBAC, Redis caching, RabbitMQ messaging, and SSH remote management. + +1. Research the existing codebase — start with src/lib.rs and src/startup.rs for routing +2. Check existing patterns in project_app/, forms/, connectors/, middleware/ +3. Review sqlx migrations in migrations/ for schema understanding +4. Check Casbin policies in access_control.conf +5. Create a step-by-step implementation plan +6. Identify risks: SQL migration conflicts, auth policy gaps, breaking API changes + +RULES: +- NEVER write code. Only plan. +- ALWAYS check sqlx query patterns (compile-time checked) +- ALWAYS consider Casbin RBAC implications for new endpoints +- ALWAYS plan new migrations for schema changes +- Flag any changes to middleware/ (affects all routes) +- Consider backward compatibility for REST API changes +- Estimate complexity of each step (small / medium / large) diff --git a/.claude/agents/rbac-reviewer.md b/.claude/agents/rbac-reviewer.md new file mode 100644 index 00000000..2b724a84 --- /dev/null +++ b/.claude/agents/rbac-reviewer.md @@ -0,0 +1,26 @@ +--- +name: rbac-reviewer +description: Reviews Casbin RBAC policies and authorization middleware in stacker. +tools: + - Read + - Grep + - Glob +--- + +You are an access control specialist reviewing Casbin RBAC in an Actix-web service. + +When authorization changes are made: + +1. Read access_control.conf for current policy definitions +2. Read src/middleware/authorization.rs for enforcement logic +3. Check that new endpoints have corresponding policy entries +4. Verify role hierarchy is correct +5. Check for privilege escalation paths +6. Verify policy changes don't remove access needed by existing features + +Output an RBAC review: +- **New Endpoints**: policy entries present for all new routes? +- **Role Coverage**: all roles have appropriate access levels? +- **Escalation Risk**: any path from lower to higher privilege? +- **Policy Consistency**: no conflicting or redundant rules? +- **Middleware Applied**: authorization middleware on all protected routes? diff --git a/.claude/agents/tester.md b/.claude/agents/tester.md new file mode 100644 index 00000000..4ebdaf00 --- /dev/null +++ b/.claude/agents/tester.md @@ -0,0 +1,28 @@ +--- +name: tester +description: Writes and runs cargo tests for stacker. Uses wiremock for HTTP mocking and sqlx test fixtures. +tools: + - Read + - Write + - Bash + - Grep + - Glob +--- + +You are a QA engineer for a Rust/Actix-web API service. + +1. Read existing test patterns in src/project_app/tests.rs and other test modules +2. Write new tests following Rust testing conventions +3. Run the FULL test suite: `cargo test` +4. Report: what passed, what failed, root cause analysis + +RULES: +- TDD: Write failing test FIRST, then verify it fails, then implement fix +- ALWAYS run full suite: `cargo test` +- Use wiremock for mocking external HTTP services +- Use mockito for simple HTTP mocks +- Use sqlx test fixtures for database tests +- FOLLOW existing test patterns exactly +- Do NOT modify existing passing tests unless explicitly asked +- Test error paths: invalid input, auth failures, database errors +- Use `SQLX_OFFLINE=true cargo test` if no database available diff --git a/.claude/agents/vault-auditor.md b/.claude/agents/vault-auditor.md new file mode 100644 index 00000000..7e027756 --- /dev/null +++ b/.claude/agents/vault-auditor.md @@ -0,0 +1,25 @@ +--- +name: vault-auditor +description: Audits Vault secrets integration in stacker. Checks src/project_app/vault.rs and all Vault access patterns. +tools: + - Read + - Grep + - Glob +--- + +You are a secrets management specialist auditing HashiCorp Vault integration in a Rust service. + +When Vault-related code is changed: + +1. Read src/project_app/vault.rs for core Vault logic +2. Grep for all Vault access patterns across the codebase +3. Check that secrets are never logged, serialized to responses, or stored in plain text +4. Verify Vault token renewal and error handling +5. Check that Vault paths follow naming conventions +6. Verify secrets are properly scoped (not over-privileged) + +Output an audit report: +- **Secret Exposure**: any paths where secrets could leak (logs, responses, errors) +- **Token Management**: proper renewal, expiration handling +- **Error Handling**: graceful degradation when Vault is unavailable +- **Access Scope**: secrets access follows least-privilege diff --git a/.sqlx/query-3fd71974a7948b85a0fa72d2c583e29118c63af715e14d9b0a50ef672b8b4d97.json b/.sqlx/query-3fd71974a7948b85a0fa72d2c583e29118c63af715e14d9b0a50ef672b8b4d97.json new file mode 100644 index 00000000..a9c47849 --- /dev/null +++ b/.sqlx/query-3fd71974a7948b85a0fa72d2c583e29118c63af715e14d9b0a50ef672b8b4d97.json @@ -0,0 +1,77 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM project\n WHERE name=$1 AND user_id=$2\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "stack_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "user_id", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "name", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "metadata", + "type_info": "Json" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "request_json", + "type_info": "Json" + }, + { + "ordinal": 8, + "name": "source_template_id", + "type_info": "Uuid" + }, + { + "ordinal": 9, + "name": "template_version", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true + ] + }, + "hash": "3fd71974a7948b85a0fa72d2c583e29118c63af715e14d9b0a50ef672b8b4d97" +} diff --git a/.sqlx/query-535d270d0a7dbfea6f82e6448d5812d656a22fbb29d0309e907b7a260dc491d3.json b/.sqlx/query-535d270d0a7dbfea6f82e6448d5812d656a22fbb29d0309e907b7a260dc491d3.json new file mode 100644 index 00000000..ae32e4be --- /dev/null +++ b/.sqlx/query-535d270d0a7dbfea6f82e6448d5812d656a22fbb29d0309e907b7a260dc491d3.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE cloud SET name = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "535d270d0a7dbfea6f82e6448d5812d656a22fbb29d0309e907b7a260dc491d3" +} diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..c9bcfdbe --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,103 @@ +# Stacker + +Core platform API service. Manages projects, stacks, cloud deployments, user access control, and marketplace. Exposes REST API consumed by the blog frontend and admin UI. + +## Tech Stack +- **Language**: Rust (2021 edition) +- **Framework**: Actix-web 4.3.1 +- **Database**: PostgreSQL (sqlx 0.8.2 with compile-time checked queries) +- **Auth**: Casbin RBAC (casbin 2.2.0, actix-casbin-auth) +- **Async**: Tokio (full features) +- **Message Queue**: RabbitMQ (lapin + deadpool-lapin) +- **Cache**: Redis (redis 0.27.5 with tokio-comp) +- **SSH**: russh 0.58 (remote server management) +- **Templates**: Tera 1.19.1 +- **Crypto**: AES-GCM, HMAC-SHA256, Ed25519 SSH keys +- **Validation**: serde_valid 0.18.0 +- **Testing**: wiremock, mockito, assert_cmd + +## Project Structure +``` +src/ + lib.rs # Library root + main.rs # Server binary entry + configuration.rs # Config loading (configuration.yaml) + startup.rs # Server initialization + telemetry.rs # Tracing/logging setup + banner.rs # Startup banner + project_app/ # Core project/stack management + upsert.rs # Create/update projects + mapping.rs # Data mapping + hydration.rs # Data hydration from DB + vault.rs # Vault secrets integration + tests.rs # Module tests + forms/ # Request validation + cloud.rs # Cloud provider forms + server.rs # Server forms + connectors/ # External service connectors + dockerhub_service.rs # DockerHub API + config.rs # Connector configuration + errors.rs # Error types + middleware/ # HTTP middleware + authorization.rs # Casbin RBAC middleware + mod.rs # Middleware registration +migrations/ # sqlx PostgreSQL migrations (up/down pairs) +configuration.yaml # Runtime configuration +access_control.conf # Casbin RBAC policy +``` + +## Binaries +- **server** — main API server (Actix-web) +- **console** — admin console commands +- **stacker-cli** — CLI tool for stack management + +## Commands +```bash +# Build (offline mode for CI without DB) +SQLX_OFFLINE=true cargo build + +# Run tests +cargo test + +# Run specific test +cargo test test_name + +# Run with features +cargo test --features explain + +# Database migrations +sqlx migrate run +sqlx migrate revert + +# Prepare offline query data +cargo sqlx prepare + +# Format & lint +cargo fmt +cargo clippy -- -D warnings + +# Run server +cargo run --bin server + +# Run CLI +cargo run --bin stacker-cli -- +``` + +## Critical Rules +- NEVER modify migration .up.sql/.down.sql files that have been applied to production +- ALWAYS create new migration files for schema changes: `sqlx migrate add ` +- ALWAYS run `cargo sqlx prepare` after changing any sqlx queries +- ALWAYS use compile-time checked queries with sqlx macros +- ALWAYS test with `cargo test` after every change +- Casbin policies in access_control.conf must be reviewed for any auth changes +- SSH key operations must handle cleanup on failure +- Vault secrets must never be logged or serialized to responses +- Use `SQLX_OFFLINE=true` for builds without database access +- Do not yet add to repo .claude CLAUDE.md .copilot related files + +## Agents +- Use `planner` before any feature work or refactoring +- Use `tester` after every code change (must run cargo test) +- Use `code-reviewer` before commits — focus on security and SQL safety +- Use `migration-checker` for any database schema changes +- Use `api-reviewer` when adding or modifying REST endpoints \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 1710d4c4..dc382334 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5785,7 +5785,7 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "stacker" -version = "0.2.6" +version = "0.2.7" dependencies = [ "actix", "actix-casbin-auth", diff --git a/Cargo.toml b/Cargo.toml index 6d702d24..97b2031c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stacker" -version = "0.2.6" +version = "0.2.7" edition = "2021" default-run= "server" diff --git a/src/bin/stacker.rs b/src/bin/stacker.rs index 96dabcad..5f943341 100644 --- a/src/bin/stacker.rs +++ b/src/bin/stacker.rs @@ -581,6 +581,48 @@ enum PipeCommands { #[arg(long)] deployment: Option, }, + /// Activate a pipe instance (start listening for triggers) + Activate { + /// Pipe instance ID (UUID) + pipe_id: String, + /// Trigger type: webhook, poll, or manual + #[arg(long, default_value = "webhook")] + trigger: String, + /// Poll interval in seconds (only for --trigger=poll) + #[arg(long, default_value = "300")] + poll_interval: u32, + /// Output in JSON format + #[arg(long)] + json: bool, + /// Deployment hash + #[arg(long)] + deployment: Option, + }, + /// Deactivate a pipe instance (stop listening) + Deactivate { + /// Pipe instance ID (UUID) + pipe_id: String, + /// Output in JSON format + #[arg(long)] + json: bool, + /// Deployment hash + #[arg(long)] + deployment: Option, + }, + /// Trigger a pipe instance manually (one-shot execution) + Trigger { + /// Pipe instance ID (UUID) + pipe_id: String, + /// Optional JSON input data to feed into the pipe + #[arg(long)] + data: Option, + /// Output in JSON format + #[arg(long)] + json: bool, + /// Deployment hash + #[arg(long)] + deployment: Option, + }, } #[derive(Debug, Subcommand)] @@ -1134,6 +1176,15 @@ fn get_command( PipeCommands::List { json, deployment } => Box::new( pipe::PipeListCommand::new(json, deployment), ), + PipeCommands::Activate { pipe_id, trigger, poll_interval, json, deployment } => Box::new( + pipe::PipeActivateCommand::new(pipe_id, trigger, poll_interval, json, deployment), + ), + PipeCommands::Deactivate { pipe_id, json, deployment } => Box::new( + pipe::PipeDeactivateCommand::new(pipe_id, json, deployment), + ), + PipeCommands::Trigger { pipe_id, data, json, deployment } => Box::new( + pipe::PipeTriggerCommand::new(pipe_id, data, json, deployment), + ), } }, StackerCommands::Agent { command: agent_cmd } => { diff --git a/src/cli/deployment_lock.rs b/src/cli/deployment_lock.rs index 159f550f..29dc5c96 100644 --- a/src/cli/deployment_lock.rs +++ b/src/cli/deployment_lock.rs @@ -11,9 +11,14 @@ use crate::cli::install_runner::DeployResult; // DeploymentLock — persisted deployment context // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -/// Filename for the deployment lockfile inside `.stacker/`. +/// Legacy filename for the deployment lockfile inside `.stacker/`. pub const LOCKFILE_NAME: &str = "deployment.lock"; +/// Returns the per-target lockfile name, e.g. `deployment-cloud.lock`. +pub fn lockfile_name_for_target(target: &str) -> String { + format!("deployment-{}.lock", target) +} + /// Persisted deployment context written after a successful deploy. /// /// Lives in `.stacker/deployment.lock` and allows subsequent deploys @@ -140,14 +145,21 @@ impl DeploymentLock { // ── Persistence ────────────────────────────────── - /// Resolve the lockfile path inside `.stacker/` relative to the project dir. + /// Resolve the per-target lockfile path (e.g. `.stacker/deployment-cloud.lock`). + pub fn lockfile_path_for_target(project_dir: &Path, target: &str) -> PathBuf { + project_dir + .join(".stacker") + .join(lockfile_name_for_target(target)) + } + + /// Legacy lockfile path (`.stacker/deployment.lock`). pub fn lockfile_path(project_dir: &Path) -> PathBuf { project_dir.join(".stacker").join(LOCKFILE_NAME) } - /// Save the lock to `.stacker/deployment.lock`. + /// Save the lock to `.stacker/deployment-{target}.lock`. pub fn save(&self, project_dir: &Path) -> Result { - let path = Self::lockfile_path(project_dir); + let path = Self::lockfile_path_for_target(project_dir, &self.target); // Ensure .stacker/ exists if let Some(parent) = path.parent() { @@ -163,17 +175,34 @@ impl DeploymentLock { Ok(path) } - /// Load a deployment lock from `.stacker/deployment.lock`. - /// Returns `None` if the file does not exist. - pub fn load(project_dir: &Path) -> Result, CliError> { - let path = Self::lockfile_path(project_dir); + /// Load a deployment lock for a specific target. + /// Falls back to the legacy `deployment.lock` if the per-target file doesn't exist. + pub fn load_for_target(project_dir: &Path, target: &str) -> Result, CliError> { + let target_path = Self::lockfile_path_for_target(project_dir, target); + if target_path.exists() { + let content = std::fs::read_to_string(&target_path).map_err(CliError::Io)?; + let lock: Self = serde_yaml::from_str(&content).map_err(|e| { + CliError::ConfigValidation(format!( + "Failed to parse deployment lock ({}): {}. Delete the file and redeploy.", + target_path.display(), + e + )) + })?; + return Ok(Some(lock)); + } + // Fallback: try legacy deployment.lock (only if its target matches) + Self::load_legacy(project_dir, Some(target)) + } + + /// Load the legacy `deployment.lock`, optionally filtering by target. + fn load_legacy(project_dir: &Path, filter_target: Option<&str>) -> Result, CliError> { + let path = Self::lockfile_path(project_dir); if !path.exists() { return Ok(None); } let content = std::fs::read_to_string(&path).map_err(CliError::Io)?; - let lock: Self = serde_yaml::from_str(&content).map_err(|e| { CliError::ConfigValidation(format!( "Failed to parse deployment lock ({}): {}. Delete the file and redeploy.", @@ -182,11 +211,49 @@ impl DeploymentLock { )) })?; + if let Some(target) = filter_target { + if lock.target != target { + return Ok(None); + } + } + Ok(Some(lock)) } - /// Check whether a lockfile exists for this project. + /// Load a deployment lock from `.stacker/deployment.lock` (legacy). + /// Returns `None` if the file does not exist. + pub fn load(project_dir: &Path) -> Result, CliError> { + // Try all per-target files first, then fall back to legacy + for target in &["cloud", "server", "local"] { + let target_path = Self::lockfile_path_for_target(project_dir, target); + if target_path.exists() { + let content = std::fs::read_to_string(&target_path).map_err(CliError::Io)?; + let lock: Self = serde_yaml::from_str(&content).map_err(|e| { + CliError::ConfigValidation(format!( + "Failed to parse deployment lock ({}): {}. Delete the file and redeploy.", + target_path.display(), + e + )) + })?; + return Ok(Some(lock)); + } + } + + Self::load_legacy(project_dir, None) + } + + /// Check whether a lockfile exists for a given target. + pub fn exists_for_target(project_dir: &Path, target: &str) -> bool { + Self::lockfile_path_for_target(project_dir, target).exists() + } + + /// Check whether any lockfile exists for this project (per-target or legacy). pub fn exists(project_dir: &Path) -> bool { + for target in &["cloud", "server", "local"] { + if Self::lockfile_path_for_target(project_dir, target).exists() { + return true; + } + } Self::lockfile_path(project_dir).exists() } @@ -277,8 +344,9 @@ mod tests { let path = lock.save(tmp.path()).unwrap(); assert!(path.exists()); + assert!(path.ends_with("deployment-cloud.lock")); - let loaded = DeploymentLock::load(tmp.path()).unwrap().unwrap(); + let loaded = DeploymentLock::load_for_target(tmp.path(), "cloud").unwrap().unwrap(); assert_eq!(loaded.server_ip, lock.server_ip); assert_eq!(loaded.deployment_id, lock.deployment_id); assert_eq!(loaded.project_id, lock.project_id); @@ -291,15 +359,75 @@ mod tests { let tmp = TempDir::new().unwrap(); let result = DeploymentLock::load(tmp.path()).unwrap(); assert!(result.is_none()); + let result = DeploymentLock::load_for_target(tmp.path(), "cloud").unwrap(); + assert!(result.is_none()); } #[test] fn exists_detection() { let tmp = TempDir::new().unwrap(); assert!(!DeploymentLock::exists(tmp.path())); + assert!(!DeploymentLock::exists_for_target(tmp.path(), "cloud")); sample_lock().save(tmp.path()).unwrap(); assert!(DeploymentLock::exists(tmp.path())); + assert!(DeploymentLock::exists_for_target(tmp.path(), "cloud")); + assert!(!DeploymentLock::exists_for_target(tmp.path(), "local")); + } + + #[test] + fn local_and_cloud_locks_coexist() { + let tmp = TempDir::new().unwrap(); + + // Save cloud lock + let cloud_lock = sample_lock(); + cloud_lock.save(tmp.path()).unwrap(); + + // Save local lock + let local_lock = DeploymentLock::for_local(); + local_lock.save(tmp.path()).unwrap(); + + // Both exist + assert!(DeploymentLock::exists_for_target(tmp.path(), "cloud")); + assert!(DeploymentLock::exists_for_target(tmp.path(), "local")); + + // Load each independently + let loaded_cloud = DeploymentLock::load_for_target(tmp.path(), "cloud").unwrap().unwrap(); + assert_eq!(loaded_cloud.server_ip, Some("203.0.113.42".to_string())); + assert_eq!(loaded_cloud.deployment_id, Some(123)); + + let loaded_local = DeploymentLock::load_for_target(tmp.path(), "local").unwrap().unwrap(); + assert_eq!(loaded_local.server_ip, Some("127.0.0.1".to_string())); + assert_eq!(loaded_local.deployment_id, None); + + // Generic load() prefers cloud over local + let generic = DeploymentLock::load(tmp.path()).unwrap().unwrap(); + assert_eq!(generic.target, "cloud"); + } + + #[test] + fn legacy_lockfile_fallback() { + let tmp = TempDir::new().unwrap(); + + // Manually write a legacy deployment.lock + let stacker_dir = tmp.path().join(".stacker"); + std::fs::create_dir_all(&stacker_dir).unwrap(); + let legacy_lock = sample_lock(); + let content = serde_yaml::to_string(&legacy_lock).unwrap(); + std::fs::write(stacker_dir.join("deployment.lock"), &content).unwrap(); + + // load_for_target("cloud") should find it via legacy fallback + let loaded = DeploymentLock::load_for_target(tmp.path(), "cloud").unwrap().unwrap(); + assert_eq!(loaded.target, "cloud"); + assert_eq!(loaded.deployment_id, Some(123)); + + // load_for_target("local") should NOT find it (target mismatch) + let loaded_local = DeploymentLock::load_for_target(tmp.path(), "local").unwrap(); + assert!(loaded_local.is_none()); + + // Generic load() should find the legacy file + let generic = DeploymentLock::load(tmp.path()).unwrap().unwrap(); + assert_eq!(generic.target, "cloud"); } #[test] diff --git a/src/cli/install_runner.rs b/src/cli/install_runner.rs index e71376f9..13f014f5 100644 --- a/src/cli/install_runner.rs +++ b/src/cli/install_runner.rs @@ -601,7 +601,7 @@ impl DeployStrategy for CloudDeploy { ) .await?; eprintln!( - " Saved cloud credentials (id={})", + " Saved/updated cloud credentials (id={})", saved.id ); Some(saved.id) diff --git a/src/cli/stacker_client.rs b/src/cli/stacker_client.rs index 42af2bac..7fd0b30f 100644 --- a/src/cli/stacker_client.rs +++ b/src/cli/stacker_client.rs @@ -186,6 +186,93 @@ pub struct DeploymentStatusInfo { pub updated_at: String, } +/// Pipe template info from `/api/v1/pipes/templates` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PipeTemplateInfo { + pub id: String, + pub name: String, + #[serde(default)] + pub description: Option, + pub source_app_type: String, + pub source_endpoint: serde_json::Value, + pub target_app_type: String, + pub target_endpoint: serde_json::Value, + #[serde(default)] + pub target_external_url: Option, + pub field_mapping: serde_json::Value, + #[serde(default)] + pub config: Option, + #[serde(default)] + pub is_public: Option, + pub created_by: String, + pub created_at: String, + pub updated_at: String, +} + +/// Pipe instance info from `/api/v1/pipes/instances` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PipeInstanceInfo { + pub id: String, + #[serde(default)] + pub template_id: Option, + pub deployment_hash: String, + pub source_container: String, + #[serde(default)] + pub target_container: Option, + #[serde(default)] + pub target_url: Option, + #[serde(default)] + pub field_mapping_override: Option, + #[serde(default)] + pub config_override: Option, + pub status: String, + #[serde(default)] + pub last_triggered_at: Option, + #[serde(default)] + pub trigger_count: i64, + #[serde(default)] + pub error_count: i64, + pub created_by: String, + pub created_at: String, + pub updated_at: String, +} + +/// Request body for creating a pipe template +#[derive(Debug, Clone, Serialize)] +pub struct CreatePipeTemplateApiRequest { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub source_app_type: String, + pub source_endpoint: serde_json::Value, + pub target_app_type: String, + pub target_endpoint: serde_json::Value, + #[serde(skip_serializing_if = "Option::is_none")] + pub target_external_url: Option, + pub field_mapping: serde_json::Value, + #[serde(skip_serializing_if = "Option::is_none")] + pub config: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub is_public: Option, +} + +/// Request body for creating a pipe instance +#[derive(Debug, Clone, Serialize)] +pub struct CreatePipeInstanceApiRequest { + pub deployment_hash: String, + pub source_container: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub target_container: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub target_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub template_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub field_mapping_override: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub config_override: Option, +} + // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ // StackerClient — HTTP client for the Stacker server // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -521,6 +608,7 @@ impl StackerClient { } /// Save cloud credentials to the Stacker server. + /// If credentials already exist for the provider, updates the existing record. pub async fn save_cloud( &self, provider: &str, @@ -528,9 +616,84 @@ impl StackerClient { cloud_key: Option<&str>, cloud_secret: Option<&str>, ) -> Result { + // Check if credentials already exist for this provider — update instead of insert + if let Some(existing) = self.find_cloud_by_provider(provider).await? { + return self.update_cloud( + existing.id, + provider, + &existing.name, + cloud_token, + cloud_key, + cloud_secret, + ).await; + } self.save_cloud_with_name(provider, None, cloud_token, cloud_key, cloud_secret).await } + /// Update existing cloud credentials by id. + pub async fn update_cloud( + &self, + id: i32, + provider: &str, + name: &str, + cloud_token: Option<&str>, + cloud_key: Option<&str>, + cloud_secret: Option<&str>, + ) -> Result { + let url = format!("{}/cloud/{}", self.base_url, id); + + let mut payload = serde_json::json!({ + "provider": provider, + "name": name, + "save_token": true, + }); + + if let Some(obj) = payload.as_object_mut() { + if let Some(t) = cloud_token { + obj.insert("cloud_token".to_string(), serde_json::Value::String(t.to_string())); + } + if let Some(k) = cloud_key { + obj.insert("cloud_key".to_string(), serde_json::Value::String(k.to_string())); + } + if let Some(s) = cloud_secret { + obj.insert("cloud_secret".to_string(), serde_json::Value::String(s.to_string())); + } + } + + let resp = self + .http + .put(&url) + .bearer_auth(&self.token) + .json(&payload) + .send() + .await + .map_err(|e| CliError::DeployFailed { + target: crate::cli::config_parser::DeployTarget::Cloud, + reason: format!("Stacker server unreachable: {}", e), + })?; + + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::DeployFailed { + target: crate::cli::config_parser::DeployTarget::Cloud, + reason: format!("Stacker server PUT /cloud/{} failed ({}): {}", id, status, body), + }); + } + + let api: ApiResponse = resp.json().await.map_err(|e| { + CliError::DeployFailed { + target: crate::cli::config_parser::DeployTarget::Cloud, + reason: format!("Invalid response from Stacker server: {}", e), + } + })?; + + api.item.ok_or_else(|| CliError::DeployFailed { + target: crate::cli::config_parser::DeployTarget::Cloud, + reason: "Stacker server updated cloud but returned no item".to_string(), + }) + } + /// Save cloud credentials with an optional name. pub async fn save_cloud_with_name( &self, @@ -1350,6 +1513,224 @@ impl StackerClient { Ok((json, hash)) } + // ── Pipe management ───────────────────────────── + + /// List pipe instances for a deployment. + /// + /// `GET /api/v1/pipes/instances/{deployment_hash}` + pub async fn list_pipe_instances( + &self, + deployment_hash: &str, + ) -> Result, CliError> { + let url = format!( + "{}/api/v1/pipes/instances/{}", + self.base_url, deployment_hash + ); + let resp = self + .http + .get(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to list pipes: {}", e)))?; + + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "List pipes failed ({}): {}", + status, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid pipe list response: {}", e)))?; + + Ok(api.list.unwrap_or_default()) + } + + /// Get a pipe instance by ID. + /// + /// `GET /api/v1/pipes/instances/detail/{instance_id}` + pub async fn get_pipe_instance( + &self, + instance_id: &str, + ) -> Result, CliError> { + let url = format!( + "{}/api/v1/pipes/instances/detail/{}", + self.base_url, instance_id + ); + let resp = self + .http + .get(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to get pipe: {}", e)))?; + + if resp.status().as_u16() == 404 { + return Ok(None); + } + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "Get pipe failed ({}): {}", + status, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid pipe response: {}", e)))?; + + Ok(api.item) + } + + /// Create a pipe template. + /// + /// `POST /api/v1/pipes/templates` + pub async fn create_pipe_template( + &self, + request: &CreatePipeTemplateApiRequest, + ) -> Result { + let url = format!("{}/api/v1/pipes/templates", self.base_url); + let resp = self + .http + .post(&url) + .bearer_auth(&self.token) + .json(request) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to create pipe template: {}", e)))?; + + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "Create pipe template failed ({}): {}", + status, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid template response: {}", e)))?; + + api.item.ok_or_else(|| CliError::ConfigValidation("Empty template response".to_string())) + } + + /// Create a pipe instance. + /// + /// `POST /api/v1/pipes/instances` + pub async fn create_pipe_instance( + &self, + request: &CreatePipeInstanceApiRequest, + ) -> Result { + let url = format!("{}/api/v1/pipes/instances", self.base_url); + let resp = self + .http + .post(&url) + .bearer_auth(&self.token) + .json(request) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to create pipe instance: {}", e)))?; + + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "Create pipe instance failed ({}): {}", + status, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid instance response: {}", e)))?; + + api.item.ok_or_else(|| CliError::ConfigValidation("Empty instance response".to_string())) + } + + /// Update pipe instance status. + /// + /// `PUT /api/v1/pipes/instances/{instance_id}/status` + pub async fn update_pipe_status( + &self, + instance_id: &str, + status: &str, + ) -> Result { + let url = format!( + "{}/api/v1/pipes/instances/{}/status", + self.base_url, instance_id + ); + let body = serde_json::json!({ "status": status }); + let resp = self + .http + .put(&url) + .bearer_auth(&self.token) + .json(&body) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to update pipe status: {}", e)))?; + + if !resp.status().is_success() { + let status_code = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "Update pipe status failed ({}): {}", + status_code, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid status response: {}", e)))?; + + api.item.ok_or_else(|| CliError::ConfigValidation("Empty status response".to_string())) + } + + /// List pipe templates visible to the current user. + /// + /// `GET /api/v1/pipes/templates` + pub async fn list_pipe_templates( + &self, + source_app_type: Option<&str>, + target_app_type: Option<&str>, + ) -> Result, CliError> { + let mut url = format!("{}/api/v1/pipes/templates", self.base_url); + let mut params = Vec::new(); + if let Some(source) = source_app_type { + params.push(format!("source_app_type={}", source)); + } + if let Some(target) = target_app_type { + params.push(format!("target_app_type={}", target)); + } + if !params.is_empty() { + url.push('?'); + url.push_str(¶ms.join("&")); + } + + let resp = self + .http + .get(&url) + .bearer_auth(&self.token) + .send() + .await + .map_err(|e| CliError::ConfigValidation(format!("Failed to list templates: {}", e)))?; + + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let body = resp.text().await.unwrap_or_default(); + return Err(CliError::ConfigValidation(format!( + "List templates failed ({}): {}", + status, body + ))); + } + + let api: ApiResponse = + resp.json().await.map_err(|e| CliError::ConfigValidation(format!("Invalid templates response: {}", e)))?; + + Ok(api.list.unwrap_or_default()) + } + // ── Marketplace (creator) ──────────────────────── /// List the current user's marketplace template submissions. diff --git a/src/configuration.rs b/src/configuration.rs index 2f740a12..3f3c1327 100644 --- a/src/configuration.rs +++ b/src/configuration.rs @@ -1,7 +1,7 @@ use crate::connectors::ConnectorConfig; use serde; -#[derive(Debug, Clone, serde::Deserialize)] +#[derive(Clone, serde::Deserialize)] pub struct Settings { pub database: DatabaseSettings, pub app_port: u16, @@ -28,6 +28,27 @@ pub struct Settings { pub deployment: DeploymentSettings, } +impl std::fmt::Debug for Settings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Settings") + .field("database", &self.database) + .field("app_port", &self.app_port) + .field("app_host", &self.app_host) + .field("auth_url", &self.auth_url) + .field("user_service_url", &self.user_service_url) + .field("max_clients_number", &self.max_clients_number) + .field("agent_command_poll_timeout_secs", &self.agent_command_poll_timeout_secs) + .field("agent_command_poll_interval_secs", &self.agent_command_poll_interval_secs) + .field("casbin_reload_enabled", &self.casbin_reload_enabled) + .field("casbin_reload_interval_secs", &self.casbin_reload_interval_secs) + .field("amqp", &self.amqp) + .field("vault", &self.vault) + .field("connectors", &self.connectors) + .field("deployment", &self.deployment) + .finish() + } +} + impl Default for Settings { fn default() -> Self { Self { @@ -71,7 +92,7 @@ impl Settings { } } -#[derive(Debug, serde::Deserialize, Clone)] +#[derive(serde::Deserialize, Clone)] pub struct DatabaseSettings { pub username: String, pub password: String, @@ -80,6 +101,18 @@ pub struct DatabaseSettings { pub database_name: String, } +impl std::fmt::Debug for DatabaseSettings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DatabaseSettings") + .field("username", &self.username) + .field("password", &"[REDACTED]") + .field("host", &self.host) + .field("port", &self.port) + .field("database_name", &self.database_name) + .finish() + } +} + impl Default for DatabaseSettings { fn default() -> Self { Self { @@ -92,7 +125,7 @@ impl Default for DatabaseSettings { } } -#[derive(Debug, serde::Deserialize, Clone)] +#[derive(serde::Deserialize, Clone)] pub struct AmqpSettings { pub username: String, pub password: String, @@ -100,6 +133,17 @@ pub struct AmqpSettings { pub port: u16, } +impl std::fmt::Debug for AmqpSettings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AmqpSettings") + .field("username", &self.username) + .field("password", &"[REDACTED]") + .field("host", &self.host) + .field("port", &self.port) + .finish() + } +} + impl Default for AmqpSettings { fn default() -> Self { Self { @@ -145,7 +189,7 @@ impl DeploymentSettings { } } -#[derive(Debug, serde::Deserialize, Clone)] +#[derive(serde::Deserialize, Clone)] pub struct VaultSettings { pub address: String, pub token: String, @@ -156,6 +200,18 @@ pub struct VaultSettings { pub ssh_key_path_prefix: Option, } +impl std::fmt::Debug for VaultSettings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VaultSettings") + .field("address", &self.address) + .field("token", &"[REDACTED]") + .field("agent_path_prefix", &self.agent_path_prefix) + .field("api_prefix", &self.api_prefix) + .field("ssh_key_path_prefix", &self.ssh_key_path_prefix) + .finish() + } +} + impl Default for VaultSettings { fn default() -> Self { Self { diff --git a/src/console/commands/cli/config.rs b/src/console/commands/cli/config.rs index 661ccafb..7a8a3549 100644 --- a/src/console/commands/cli/config.rs +++ b/src/console/commands/cli/config.rs @@ -823,11 +823,11 @@ impl CallableTrait for ConfigLockCommand { let config_path_str = resolve_config_path(&self.file); let config_path = project_dir.join(&config_path_str); - // 1. Load lockfile + // 1. Load lockfile (prefer cloud/server) let lock = match DeploymentLock::load(&project_dir)? { Some(l) => l, None => { - eprintln!("No deployment lock found (.stacker/deployment.lock)."); + eprintln!("No deployment lock found in .stacker/."); eprintln!("Deploy first with `stacker deploy`, then run this command."); return Ok(()); } diff --git a/src/console/commands/cli/deploy.rs b/src/console/commands/cli/deploy.rs index fb983925..4a1df406 100644 --- a/src/console/commands/cli/deploy.rs +++ b/src/console/commands/cli/deploy.rs @@ -921,17 +921,17 @@ pub fn run_deploy( }); } } - } else if DeploymentLock::exists(project_dir) { + } else if DeploymentLock::exists_for_target(project_dir, "cloud") || DeploymentLock::exists(project_dir) { // No deploy.server in config, but a lockfile exists from a prior deploy. // Auto-inject the server name so the cloud deploy API reuses the same server. - if let Ok(Some(lock)) = DeploymentLock::load(project_dir) { + if let Ok(Some(lock)) = DeploymentLock::load_for_target(project_dir, "cloud") { if let Some(ref name) = lock.server_name { - eprintln!(" ℹ Found previous deployment (server='{}') — reusing server", name); + eprintln!(" ℹ Found previous cloud deployment (server='{}') — reusing server", name); eprintln!(" To provision a new server instead: stacker deploy --force-new"); lock_server_name = Some(name.clone()); } else if let Some(ref ip) = lock.server_ip { if ip != "127.0.0.1" { - eprintln!(" ℹ Found previous deployment to {} (from .stacker/deployment.lock)", ip); + eprintln!(" ℹ Found previous deployment to {} (from deployment lock)", ip); eprintln!(" Server name unknown — cannot auto-reuse. Run: stacker config lock"); eprintln!(" To provision a new server instead: stacker deploy --force-new"); } diff --git a/src/console/commands/cli/pipe.rs b/src/console/commands/cli/pipe.rs index ba63ccc9..2e2738bd 100644 --- a/src/console/commands/cli/pipe.rs +++ b/src/console/commands/cli/pipe.rs @@ -11,7 +11,10 @@ use crate::cli::error::CliError; use crate::cli::fmt; use crate::cli::progress; use crate::cli::runtime::CliRuntime; -use crate::cli::stacker_client::{AgentCommandInfo, AgentEnqueueRequest}; +use crate::cli::stacker_client::{ + AgentCommandInfo, AgentEnqueueRequest, CreatePipeInstanceApiRequest, + CreatePipeTemplateApiRequest, +}; use crate::console::commands::CallableTrait; /// Default poll timeout for pipe probe commands (seconds). @@ -339,7 +342,7 @@ fn print_scan_result(info: &AgentCommandInfo) { } // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// stacker pipe create (placeholder for Phase 1) +// stacker pipe create — interactive pipe creation // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ pub struct PipeCreateCommand { @@ -368,6 +371,39 @@ impl PipeCreateCommand { } } +/// Extract operations from a probe result as a flat list of (method, path, summary, fields). +fn extract_operations(info: &AgentCommandInfo) -> Vec<(String, String, String, Vec)> { + let mut ops = Vec::new(); + if let Some(ref result) = info.result { + if let Some(endpoints) = result["endpoints"].as_array() { + for ep in endpoints { + let base = ep["base_url"].as_str().unwrap_or(""); + if let Some(operations) = ep["operations"].as_array() { + for op in operations { + let method = op["method"].as_str().unwrap_or("GET").to_string(); + let path = format!( + "{}{}", + base, + op["path"].as_str().unwrap_or("") + ); + let summary = op["summary"].as_str().unwrap_or("").to_string(); + let fields = op["fields"] + .as_array() + .map(|a| { + a.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(); + ops.push((method, path, summary, fields)); + } + } + } + } + } + ops +} + impl CallableTrait for PipeCreateCommand { fn call(&self) -> Result<(), Box> { let ctx = CliRuntime::new("pipe create")?; @@ -419,23 +455,193 @@ impl CallableTrait for PipeCreateCommand { PROBE_TIMEOUT_SECS, )?; - // Print results for both - println!("\n=== Source: {} ===", self.source); - print_scan_result(&source_info); + if source_info.status != "completed" || target_info.status != "completed" { + eprintln!("Scan failed for one or both apps. Cannot create pipe."); + if source_info.status != "completed" { + eprintln!(" Source '{}': {}", self.source, source_info.status); + } + if target_info.status != "completed" { + eprintln!(" Target '{}': {}", self.target, target_info.status); + } + return Ok(()); + } - println!("\n=== Target: {} ===", self.target); - print_scan_result(&target_info); + // Step 2: Extract discovered endpoints + let source_ops = extract_operations(&source_info); + let target_ops = extract_operations(&target_info); - // TODO Phase 1: Interactive matching + AI field mapping + pipe storage - println!("Interactive pipe creation will be available in the next release."); - println!("For now, use 'stacker pipe scan ' to discover endpoints."); + if source_ops.is_empty() { + eprintln!("No endpoints discovered on source app '{}'. Cannot create pipe.", self.source); + return Ok(()); + } + if target_ops.is_empty() { + eprintln!("No endpoints discovered on target app '{}'. Cannot create pipe.", self.target); + return Ok(()); + } + + // Step 3: Let user select source endpoint + let source_labels: Vec = source_ops + .iter() + .map(|(m, p, s, _)| { + if s.is_empty() { + format!("{:>6} {}", m, p) + } else { + format!("{:>6} {} — {}", m, p, s) + } + }) + .collect(); + + println!("\n Select source endpoint (data comes FROM here):"); + let source_idx = dialoguer::Select::new() + .items(&source_labels) + .default(0) + .interact()?; + + let (ref src_method, ref src_path, _, ref src_fields) = source_ops[source_idx]; + + // Step 4: Let user select target endpoint + let target_labels: Vec = target_ops + .iter() + .map(|(m, p, s, _)| { + if s.is_empty() { + format!("{:>6} {}", m, p) + } else { + format!("{:>6} {} — {}", m, p, s) + } + }) + .collect(); + + println!("\n Select target endpoint (data goes TO here):"); + let target_idx = dialoguer::Select::new() + .items(&target_labels) + .default(0) + .interact()?; + + let (ref tgt_method, ref tgt_path, _, ref tgt_fields) = target_ops[target_idx]; + + // Step 5: Build field mapping + let field_mapping = if !self.manual && !src_fields.is_empty() && !tgt_fields.is_empty() { + // Auto-suggest mapping by matching field names + println!("\n Auto-matching fields (source → target):"); + let mut mapping = serde_json::Map::new(); + for tgt_field in tgt_fields { + // Direct name match + if src_fields.contains(tgt_field) { + println!(" {} → {} ✓", tgt_field, tgt_field); + mapping.insert( + tgt_field.clone(), + serde_json::Value::String(format!("$.{}", tgt_field)), + ); + } + } + + // Show unmatched target fields + let unmatched: Vec<&String> = tgt_fields + .iter() + .filter(|f| !mapping.contains_key(*f)) + .collect(); + if !unmatched.is_empty() { + println!(" Unmatched target fields: {}", unmatched.iter().map(|s| s.as_str()).collect::>().join(", ")); + println!(" (You can edit the field mapping later via the API)"); + } + + if mapping.is_empty() { + // No auto-matches — create pass-through + println!(" No auto-matches found. Creating pass-through mapping."); + for sf in src_fields { + mapping.insert( + sf.clone(), + serde_json::Value::String(format!("$.{}", sf)), + ); + } + } + + serde_json::Value::Object(mapping) + } else { + // Manual mode or no fields discovered + println!("\n No field auto-matching available. Creating identity mapping."); + serde_json::json!({}) + }; + + // Step 6: Ask for pipe name + let default_name = format!("{}-to-{}", self.source, self.target); + let pipe_name: String = dialoguer::Input::new() + .with_prompt("Pipe name") + .default(default_name) + .interact_text()?; + + // Step 7: Create template via API + let template_request = CreatePipeTemplateApiRequest { + name: pipe_name.clone(), + description: Some(format!( + "{} {} → {} {}", + src_method, src_path, tgt_method, tgt_path + )), + source_app_type: self.source.clone(), + source_endpoint: serde_json::json!({ + "path": src_path, + "method": src_method, + }), + target_app_type: self.target.clone(), + target_endpoint: serde_json::json!({ + "path": tgt_path, + "method": tgt_method, + }), + target_external_url: None, + field_mapping: field_mapping.clone(), + config: Some(serde_json::json!({"retry_count": 3})), + is_public: Some(false), + }; + + let pb = progress::spinner("Creating pipe template..."); + let template = ctx.block_on(ctx.client.create_pipe_template(&template_request)) + .map_err(|e| { + progress::finish_error(&pb, "Template creation failed"); + e + })?; + progress::finish_success(&pb, "Template created"); + + // Step 8: Create instance linked to this deployment + let instance_request = CreatePipeInstanceApiRequest { + deployment_hash: hash.clone(), + source_container: self.source.clone(), + target_container: Some(self.target.clone()), + target_url: None, + template_id: Some(template.id.clone()), + field_mapping_override: None, + config_override: None, + }; + + let pb = progress::spinner("Creating pipe instance..."); + let instance = ctx.block_on(ctx.client.create_pipe_instance(&instance_request)) + .map_err(|e| { + progress::finish_error(&pb, "Instance creation failed"); + e + })?; + progress::finish_success(&pb, "Pipe instance created"); + + if self.json { + let output = serde_json::json!({ + "template": template, + "instance": instance, + }); + println!("{}", serde_json::to_string_pretty(&output)?); + } else { + println!("\n ✓ Pipe '{}' created successfully", pipe_name); + println!(" Template ID: {}", template.id); + println!(" Instance ID: {}", instance.id); + println!(" Source: {} ({})", self.source, src_path); + println!(" Target: {} ({})", self.target, tgt_path); + println!(" Status: {} (use 'stacker pipe activate {}' to start)", instance.status, instance.id); + println!(" Mapping: {}", serde_json::to_string(&field_mapping)?); + } Ok(()) } } // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// stacker pipe list (placeholder for Phase 1) +// stacker pipe list — list active pipes for a deployment // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ pub struct PipeListCommand { @@ -451,9 +657,294 @@ impl PipeListCommand { impl CallableTrait for PipeListCommand { fn call(&self) -> Result<(), Box> { - // TODO Phase 1: Query pipes from database - println!("No pipes configured yet."); - println!("Use 'stacker pipe create ' to create a pipe."); + let ctx = CliRuntime::new("pipe list")?; + let hash = resolve_deployment_hash(&self.deployment, &ctx)?; + + let pb = progress::spinner("Fetching pipes..."); + let pipes = ctx.block_on(ctx.client.list_pipe_instances(&hash)) + .map_err(|e| { + progress::finish_error(&pb, "Failed to fetch pipes"); + e + })?; + progress::finish_success(&pb, &format!("{} pipe(s) found", pipes.len())); + + if pipes.is_empty() { + println!("No pipes configured for this deployment."); + println!("Use 'stacker pipe create ' to create a pipe."); + return Ok(()); + } + + if self.json { + println!("{}", serde_json::to_string_pretty(&pipes)?); + return Ok(()); + } + + // Table header + println!( + "\n{:<38} {:<15} {:<15} {:<10} {:>8} {:>8} {}", + "ID", "SOURCE", "TARGET", "STATUS", "TRIGGERS", "ERRORS", "LAST TRIGGERED" + ); + println!("{}", "─".repeat(120)); + + for pipe in &pipes { + let target = pipe + .target_container + .as_deref() + .or(pipe.target_url.as_deref()) + .unwrap_or("-"); + let last = pipe + .last_triggered_at + .as_deref() + .unwrap_or("never"); + let status_icon = match pipe.status.as_str() { + "active" => "● active", + "paused" => "◉ paused", + "error" => "✗ error", + _ => "○ draft", + }; + + println!( + "{:<38} {:<15} {:<15} {:<10} {:>8} {:>8} {}", + &pipe.id, + truncate_str(&pipe.source_container, 14), + truncate_str(target, 14), + status_icon, + pipe.trigger_count, + pipe.error_count, + last, + ); + } + + println!("\n{} pipe(s) total.", pipes.len()); + Ok(()) + } +} + +fn truncate_str(s: &str, max: usize) -> String { + if s.len() <= max { + s.to_string() + } else { + format!("{}…", &s[..max - 1]) + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker pipe activate — activate a pipe instance +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +pub struct PipeActivateCommand { + pub pipe_id: String, + pub trigger: String, + pub poll_interval: u32, + pub json: bool, + pub deployment: Option, +} + +impl PipeActivateCommand { + pub fn new( + pipe_id: String, + trigger: String, + poll_interval: u32, + json: bool, + deployment: Option, + ) -> Self { + Self { pipe_id, trigger, poll_interval, json, deployment } + } +} + +impl CallableTrait for PipeActivateCommand { + fn call(&self) -> Result<(), Box> { + let ctx = CliRuntime::new("pipe activate")?; + let hash = resolve_deployment_hash(&self.deployment, &ctx)?; + + // Fetch pipe instance details to get source/target info + let pb = progress::spinner("Fetching pipe details..."); + let pipe = ctx.block_on(ctx.client.get_pipe_instance(&self.pipe_id)) + .map_err(|e| { progress::finish_error(&pb, "Failed"); e })? + .ok_or_else(|| CliError::ConfigValidation( + format!("Pipe instance '{}' not found", self.pipe_id), + ))?; + progress::finish_success(&pb, "Pipe found"); + + // Get template info for endpoint details (if linked) + let (source_endpoint, source_method, target_endpoint, target_method, field_mapping) = + if let Some(ref tid) = pipe.template_id { + let templates = ctx.block_on(ctx.client.list_pipe_templates(None, None))?; + if let Some(tmpl) = templates.iter().find(|t| &t.id == tid) { + ( + tmpl.source_endpoint["path"].as_str().unwrap_or("/").to_string(), + tmpl.source_endpoint["method"].as_str().unwrap_or("GET").to_string(), + tmpl.target_endpoint["path"].as_str().unwrap_or("/").to_string(), + tmpl.target_endpoint["method"].as_str().unwrap_or("POST").to_string(), + pipe.field_mapping_override.clone().unwrap_or(tmpl.field_mapping.clone()), + ) + } else { + ("/".to_string(), "GET".to_string(), "/".to_string(), "POST".to_string(), serde_json::json!({})) + } + } else { + ("/".to_string(), "GET".to_string(), "/".to_string(), "POST".to_string(), + pipe.field_mapping_override.clone().unwrap_or(serde_json::json!({}))) + }; + + // 1. Update status to "active" via API + let pb = progress::spinner("Setting pipe status to active..."); + ctx.block_on(ctx.client.update_pipe_status(&self.pipe_id, "active")) + .map_err(|e| { progress::finish_error(&pb, "Status update failed"); e })?; + progress::finish_success(&pb, "Status: active"); + + // 2. Send activate_pipe command to agent + let params = serde_json::json!({ + "pipe_instance_id": self.pipe_id, + "source_container": pipe.source_container, + "source_endpoint": source_endpoint, + "source_method": source_method, + "target_container": pipe.target_container, + "target_url": pipe.target_url, + "target_endpoint": target_endpoint, + "target_method": target_method, + "field_mapping": field_mapping, + "trigger_type": self.trigger, + "poll_interval_secs": self.poll_interval, + }); + + let request = AgentEnqueueRequest::new(&hash, "activate_pipe") + .with_raw_parameters(params); + + let info = run_agent_command( + &ctx, + &request, + "Activating pipe on agent", + PROBE_TIMEOUT_SECS, + )?; + + print_command_result(&info, self.json); + + if !self.json && info.status == "completed" { + println!("\n ✓ Pipe '{}' is now active", self.pipe_id); + println!(" Trigger type: {}", self.trigger); + if self.trigger == "poll" { + println!(" Poll interval: {}s", self.poll_interval); + } + } + + Ok(()) + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker pipe deactivate — stop a pipe +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +pub struct PipeDeactivateCommand { + pub pipe_id: String, + pub json: bool, + pub deployment: Option, +} + +impl PipeDeactivateCommand { + pub fn new(pipe_id: String, json: bool, deployment: Option) -> Self { + Self { pipe_id, json, deployment } + } +} + +impl CallableTrait for PipeDeactivateCommand { + fn call(&self) -> Result<(), Box> { + let ctx = CliRuntime::new("pipe deactivate")?; + let hash = resolve_deployment_hash(&self.deployment, &ctx)?; + + // 1. Update status to "paused" via API + let pb = progress::spinner("Setting pipe status to paused..."); + ctx.block_on(ctx.client.update_pipe_status(&self.pipe_id, "paused")) + .map_err(|e| { progress::finish_error(&pb, "Status update failed"); e })?; + progress::finish_success(&pb, "Status: paused"); + + // 2. Send deactivate_pipe command to agent + let params = serde_json::json!({ + "pipe_instance_id": self.pipe_id, + }); + + let request = AgentEnqueueRequest::new(&hash, "deactivate_pipe") + .with_raw_parameters(params); + + let info = run_agent_command( + &ctx, + &request, + "Deactivating pipe on agent", + PROBE_TIMEOUT_SECS, + )?; + + print_command_result(&info, self.json); + + if !self.json && info.status == "completed" { + println!("\n ✓ Pipe '{}' deactivated", self.pipe_id); + } + + Ok(()) + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker pipe trigger — one-shot pipe execution +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +pub struct PipeTriggerCommand { + pub pipe_id: String, + pub data: Option, + pub json: bool, + pub deployment: Option, +} + +impl PipeTriggerCommand { + pub fn new(pipe_id: String, data: Option, json: bool, deployment: Option) -> Self { + Self { pipe_id, data, json, deployment } + } +} + +impl CallableTrait for PipeTriggerCommand { + fn call(&self) -> Result<(), Box> { + let ctx = CliRuntime::new("pipe trigger")?; + let hash = resolve_deployment_hash(&self.deployment, &ctx)?; + + let input_data = match &self.data { + Some(raw) => { + let parsed: serde_json::Value = serde_json::from_str(raw) + .map_err(|e| CliError::ConfigValidation(format!("Invalid JSON data: {}", e)))?; + Some(parsed) + } + None => None, + }; + + let params = serde_json::json!({ + "pipe_instance_id": self.pipe_id, + "input_data": input_data, + }); + + let request = AgentEnqueueRequest::new(&hash, "trigger_pipe") + .with_raw_parameters(params); + + let info = run_agent_command( + &ctx, + &request, + "Triggering pipe", + PROBE_TIMEOUT_SECS, + )?; + + print_command_result(&info, self.json); + + if !self.json { + if info.status == "completed" { + if let Some(ref result) = info.result { + let success = result["success"].as_bool().unwrap_or(false); + if success { + println!("\n ✓ Pipe '{}' triggered successfully", self.pipe_id); + } else { + let error = result["error"].as_str().unwrap_or("unknown error"); + eprintln!("\n ✗ Pipe trigger failed: {}", error); + } + } + } + } + Ok(()) } } diff --git a/src/db/cloud.rs b/src/db/cloud.rs index 8e60c674..3137b2e7 100644 --- a/src/db/cloud.rs +++ b/src/db/cloud.rs @@ -45,16 +45,17 @@ pub async fn fetch_by_user(pool: &PgPool, user_id: &str) -> Result Result { let query_span = tracing::info_span!("Saving user's cloud data into the database"); - // If no name provided, we'll generate a default after insert (need the ID) + // If no name provided, generate a unique default using a UUID suffix to + // avoid collisions on the (user_id, name) unique constraint. let has_name = !cloud.name.is_empty(); let insert_name = if has_name { cloud.name.clone() } else { - // Temporary placeholder; will be updated below - format!("{}-0", cloud.provider) + let suffix = uuid::Uuid::new_v4().to_string(); + format!("{}-{}", cloud.provider, &suffix[..8]) }; - sqlx::query!( + let result = sqlx::query!( r#" INSERT INTO cloud ( user_id, @@ -81,21 +82,38 @@ pub async fn insert(pool: &PgPool, mut cloud: models::Cloud) -> Result Result { @@ -140,13 +158,14 @@ pub async fn update(pool: &PgPool, mut cloud: models::Cloud) -> Result Result { +pub async fn delete(pool: &PgPool, id: i32, user_id: &str) -> Result { tracing::info!("Delete cloud {}", id); - sqlx::query::("DELETE FROM cloud WHERE id = $1;") + sqlx::query::("DELETE FROM cloud WHERE id = $1 AND user_id = $2;") .bind(id) + .bind(user_id) .execute(pool) .await - .map(|_| true) + .map(|r| r.rows_affected() > 0) .map_err(|err| { tracing::error!("Failed to delete cloud: {:?}", err); "Failed to delete cloud".to_string() diff --git a/src/db/pipe.rs b/src/db/pipe.rs index 8f43edc6..07167654 100644 --- a/src/db/pipe.rs +++ b/src/db/pipe.rs @@ -99,6 +99,64 @@ pub async fn get_template_by_name( }) } +/// List pipe templates visible to a specific user (own templates + public templates) +#[tracing::instrument(name = "List pipe templates for user", skip(pool))] +pub async fn list_templates_for_user( + pool: &PgPool, + user_id: &str, + source_app_type: Option<&str>, + target_app_type: Option<&str>, + public_only: bool, +) -> Result, String> { + let query_span = tracing::info_span!("Listing pipe templates for user"); + + let mut sql = String::from( + r#" + SELECT id, name, description, source_app_type, source_endpoint, + target_app_type, target_endpoint, target_external_url, + field_mapping, config, is_public, created_by, created_at, updated_at + FROM pipe_templates + WHERE (created_by = $1 OR is_public = true) + "#, + ); + + let mut param_idx = 2; + if source_app_type.is_some() { + sql.push_str(&format!(" AND source_app_type = ${}", param_idx)); + param_idx += 1; + } + if target_app_type.is_some() { + sql.push_str(&format!(" AND target_app_type = ${}", param_idx)); + param_idx += 1; + } + if public_only { + sql.push_str(&format!(" AND is_public = ${}", param_idx)); + } + sql.push_str(" ORDER BY created_at DESC"); + + let mut query = sqlx::query_as::<_, PipeTemplate>(&sql); + query = query.bind(user_id.to_string()); + + if let Some(source) = source_app_type { + query = query.bind(source.to_string()); + } + if let Some(target) = target_app_type { + query = query.bind(target.to_string()); + } + if public_only { + query = query.bind(true); + } + + query + .fetch_all(pool) + .instrument(query_span) + .await + .map_err(|err| { + tracing::error!("Failed to list pipe templates for user: {:?}", err); + format!("Failed to list pipe templates: {}", err) + }) +} + /// List pipe templates with optional filters #[tracing::instrument(name = "List pipe templates", skip(pool))] pub async fn list_templates( diff --git a/src/db/project.rs b/src/db/project.rs index a2c57f6a..c8425681 100644 --- a/src/db/project.rs +++ b/src/db/project.rs @@ -51,6 +51,7 @@ pub async fn fetch_by_user(pool: &PgPool, user_id: &str) -> Result Result, String> { let query_span = tracing::info_span!("Fetch one project by name."); sqlx::query_as!( @@ -59,10 +60,11 @@ pub async fn fetch_one_by_name( SELECT * FROM project - WHERE name=$1 + WHERE name=$1 AND user_id=$2 LIMIT 1 "#, - name + name, + user_id ) .fetch_one(pool) .instrument(query_span) @@ -150,13 +152,14 @@ pub async fn update( } #[tracing::instrument(name = "Delete user's project.")] -pub async fn delete(pool: &PgPool, id: i32) -> Result { +pub async fn delete(pool: &PgPool, id: i32, user_id: &str) -> Result { tracing::info!("Delete project {}", id); - sqlx::query::("DELETE FROM project WHERE id = $1;") + sqlx::query::("DELETE FROM project WHERE id = $1 AND user_id = $2;") .bind(id) + .bind(user_id) .execute(pool) .await - .map(|_| true) + .map(|r| r.rows_affected() > 0) .map_err(|err| { tracing::error!("Failed to delete project: {:?}", err); "Failed to delete project".to_string() diff --git a/src/db/server.rs b/src/db/server.rs index 2a64a48a..83208faf 100644 --- a/src/db/server.rs +++ b/src/db/server.rs @@ -325,13 +325,14 @@ pub async fn update_srv_ip( } #[tracing::instrument(name = "Delete user's server.")] -pub async fn delete(pool: &PgPool, id: i32) -> Result { +pub async fn delete(pool: &PgPool, id: i32, user_id: &str) -> Result { tracing::info!("Delete server {}", id); - sqlx::query::("DELETE FROM server WHERE id = $1;") + sqlx::query::("DELETE FROM server WHERE id = $1 AND user_id = $2;") .bind(id) + .bind(user_id) .execute(pool) .await - .map(|_| true) + .map(|r| r.rows_affected() > 0) .map_err(|err| { tracing::error!("Failed to delete server: {:?}", err); "Failed to delete server".to_string() diff --git a/src/forms/cloud.rs b/src/forms/cloud.rs index 9df93c8f..0aece97b 100644 --- a/src/forms/cloud.rs +++ b/src/forms/cloud.rs @@ -27,7 +27,7 @@ pub struct CloudForm { } impl CloudForm { - #[tracing::instrument(name = "impl CloudForm::decode()")] + #[tracing::instrument(name = "impl CloudForm::decode()", skip_all)] pub(crate) fn decode(secret: &mut Secret, encrypted_value: String) -> String { // tracing::error!("encrypted_value {:?}", &encrypted_value); let b64_decoded = Secret::b64_decode(&encrypted_value).unwrap(); @@ -61,7 +61,7 @@ impl CloudForm { } // @todo should be refactored, may be moved to cloud.into() or Secret::from() - #[tracing::instrument(name = "decode_model")] + #[tracing::instrument(name = "decode_model", skip_all)] pub fn decode_model(mut cloud: models::Cloud, reveal: bool) -> models::Cloud { let mut secret = Secret::new(); secret.user_id = cloud.user_id.clone(); @@ -87,28 +87,16 @@ impl CloudForm { impl std::fmt::Debug for CloudForm { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let cloud_key: String = match self.cloud_key.as_ref() { - Some(val) => val.chars().take(4).collect::() + "****", - None => "".to_string(), - }; - let cloud_token: String = match self.cloud_token.as_ref() { - Some(val) => { - eprintln!("cloud token {val:?}"); - val.chars().take(4).collect::() + "****" - } - None => "".to_string(), - }; - - let cloud_secret: String = match self.cloud_secret.as_ref() { - Some(val) => val.chars().take(4).collect::() + "****", - None => "".to_string(), - }; - - write!( - f, - "{} cloud creds: cloud_key : {} cloud_token: {} cloud_secret: {} project_id: {:?}", - self.provider, cloud_key, cloud_token, cloud_secret, self.project_id - ) + f.debug_struct("CloudForm") + .field("user_id", &self.user_id) + .field("project_id", &self.project_id) + .field("name", &self.name) + .field("provider", &self.provider) + .field("cloud_token", &"[REDACTED]") + .field("cloud_key", &"[REDACTED]") + .field("cloud_secret", &"[REDACTED]") + .field("save_token", &self.save_token) + .finish() } } @@ -129,7 +117,7 @@ fn encrypt_field(secret: &mut Secret, field_name: &str, value: Option) - } impl Into for &CloudForm { - #[tracing::instrument(name = "impl Into for &CloudForm")] + #[tracing::instrument(name = "impl Into for &CloudForm", skip_all)] fn into(self) -> models::Cloud { let mut cloud = models::Cloud::default(); cloud.provider = self.provider.clone(); @@ -160,7 +148,7 @@ impl Into for &CloudForm { // on deploy impl Into for models::Cloud { - #[tracing::instrument(name = "Into for models::Cloud .")] + #[tracing::instrument(name = "Into for models::Cloud .", skip_all)] fn into(self) -> CloudForm { let mut form = CloudForm::default(); form.provider = self.provider.clone(); diff --git a/src/forms/project/app.rs b/src/forms/project/app.rs index 63ff2168..6387666f 100644 --- a/src/forms/project/app.rs +++ b/src/forms/project/app.rs @@ -75,7 +75,7 @@ pub struct App { } impl App { - #[tracing::instrument(name = "named_volumes")] + #[tracing::instrument(name = "named_volumes", skip_all)] pub fn named_volumes(&self) -> IndexMap> { let mut named_volumes = IndexMap::default(); diff --git a/src/forms/project/deploy.rs b/src/forms/project/deploy.rs index 203d40fc..88c7a712 100644 --- a/src/forms/project/deploy.rs +++ b/src/forms/project/deploy.rs @@ -5,7 +5,7 @@ use serde_json::Value; use serde_valid::Validate; /// Docker registry credentials for pulling private images during deployment. -#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Default, Clone, PartialEq, Serialize, Deserialize)] pub struct RegistryForm { #[serde(skip_serializing_if = "Option::is_none")] pub docker_username: Option, @@ -15,6 +15,16 @@ pub struct RegistryForm { pub docker_registry: Option, } +impl std::fmt::Debug for RegistryForm { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RegistryForm") + .field("docker_username", &self.docker_username) + .field("docker_password", &"[REDACTED]") + .field("docker_registry", &self.docker_registry) + .finish() + } +} + /// Validates that cloud deployments have required instance configuration fn validate_cloud_instance_config(deploy: &Deploy) -> Result<(), serde_valid::validation::Error> { // Skip validation for "own" server deployments diff --git a/src/forms/project/docker_image.rs b/src/forms/project/docker_image.rs index c2cb7c36..91817074 100644 --- a/src/forms/project/docker_image.rs +++ b/src/forms/project/docker_image.rs @@ -54,7 +54,7 @@ impl fmt::Display for DockerImage { } impl DockerImage { - #[tracing::instrument(name = "is_active")] + #[tracing::instrument(name = "is_active", skip_all)] pub async fn is_active(&self) -> Result { DockerHub::try_from(self)?.is_active().await } diff --git a/src/forms/server.rs b/src/forms/server.rs index b4367972..bae896e4 100644 --- a/src/forms/server.rs +++ b/src/forms/server.rs @@ -3,7 +3,7 @@ use chrono::Utc; use serde::{Deserialize, Serialize}; use serde_valid::Validate; -#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize, Validate)] +#[derive(Default, Clone, PartialEq, Serialize, Deserialize, Validate)] pub struct ServerForm { /// If provided, update this existing server instead of creating new pub server_id: Option, @@ -38,6 +38,28 @@ pub struct ServerForm { pub ssh_private_key: Option, } +impl std::fmt::Debug for ServerForm { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ServerForm") + .field("server_id", &self.server_id) + .field("cloud_id", &self.cloud_id) + .field("region", &self.region) + .field("zone", &self.zone) + .field("server", &self.server) + .field("os", &self.os) + .field("disk_type", &self.disk_type) + .field("srv_ip", &self.srv_ip) + .field("ssh_port", &self.ssh_port) + .field("ssh_user", &self.ssh_user) + .field("name", &self.name) + .field("connection_mode", &self.connection_mode) + .field("vault_key_path", &self.vault_key_path) + .field("public_key", &"[REDACTED]") + .field("ssh_private_key", &"[REDACTED]") + .finish() + } +} + pub fn default_ssh_port() -> Option { Some(22) } diff --git a/src/forms/status_panel.rs b/src/forms/status_panel.rs index ee252052..96d049bf 100644 --- a/src/forms/status_panel.rs +++ b/src/forms/status_panel.rs @@ -563,6 +563,62 @@ pub fn validate_command_parameters( .map(Some) .map_err(|err| format!("Failed to encode check_connections parameters: {}", err)) } + "activate_pipe" => { + let value = parameters.clone().ok_or_else(|| "activate_pipe requires parameters".to_string())?; + let params: ActivatePipeCommandRequest = serde_json::from_value(value) + .map_err(|err| format!("Invalid activate_pipe parameters: {}", err))?; + + // Validate pipe_instance_id is non-empty + if params.pipe_instance_id.trim().is_empty() { + return Err("activate_pipe: pipe_instance_id is required".to_string()); + } + // Validate target: at least one of target_container or target_url + if params.target_container.is_none() && params.target_url.is_none() { + return Err("activate_pipe: either target_container or target_url is required".to_string()); + } + // Validate trigger_type + let valid_triggers = ["webhook", "poll", "manual"]; + if !valid_triggers.contains(¶ms.trigger_type.as_str()) { + return Err(format!( + "activate_pipe: trigger_type must be one of: {}; got '{}'", + valid_triggers.join(", "), params.trigger_type + )); + } + // Validate poll_interval for poll trigger + if params.trigger_type == "poll" && (params.poll_interval_secs < 10 || params.poll_interval_secs > 86400) { + return Err("activate_pipe: poll_interval_secs must be between 10 and 86400".to_string()); + } + + serde_json::to_value(params) + .map(Some) + .map_err(|err| format!("Failed to encode activate_pipe parameters: {}", err)) + } + "deactivate_pipe" => { + let value = parameters.clone().ok_or_else(|| "deactivate_pipe requires parameters".to_string())?; + let params: DeactivatePipeCommandRequest = serde_json::from_value(value) + .map_err(|err| format!("Invalid deactivate_pipe parameters: {}", err))?; + + if params.pipe_instance_id.trim().is_empty() { + return Err("deactivate_pipe: pipe_instance_id is required".to_string()); + } + + serde_json::to_value(params) + .map(Some) + .map_err(|err| format!("Failed to encode deactivate_pipe parameters: {}", err)) + } + "trigger_pipe" => { + let value = parameters.clone().ok_or_else(|| "trigger_pipe requires parameters".to_string())?; + let params: TriggerPipeCommandRequest = serde_json::from_value(value) + .map_err(|err| format!("Invalid trigger_pipe parameters: {}", err))?; + + if params.pipe_instance_id.trim().is_empty() { + return Err("trigger_pipe: pipe_instance_id is required".to_string()); + } + + serde_json::to_value(params) + .map(Some) + .map_err(|err| format!("Failed to encode trigger_pipe parameters: {}", err)) + } _ => Ok(parameters.clone()), } } @@ -672,6 +728,57 @@ pub fn validate_command_result( .map(Some) .map_err(|err| format!("Failed to encode probe_endpoints result: {}", err)) } + "activate_pipe" => { + let value = result.clone() + .ok_or_else(|| "activate_pipe result payload is required".to_string())?; + let report: ActivatePipeCommandReport = serde_json::from_value(value) + .map_err(|err| format!("Invalid activate_pipe result: {}", err))?; + + if report.command_type != "activate_pipe" { + return Err("activate_pipe result must include type='activate_pipe'".to_string()); + } + if report.deployment_hash != deployment_hash { + return Err("activate_pipe result deployment_hash mismatch".to_string()); + } + + serde_json::to_value(report) + .map(Some) + .map_err(|err| format!("Failed to encode activate_pipe result: {}", err)) + } + "deactivate_pipe" => { + let value = result.clone() + .ok_or_else(|| "deactivate_pipe result payload is required".to_string())?; + let report: DeactivatePipeCommandReport = serde_json::from_value(value) + .map_err(|err| format!("Invalid deactivate_pipe result: {}", err))?; + + if report.command_type != "deactivate_pipe" { + return Err("deactivate_pipe result must include type='deactivate_pipe'".to_string()); + } + if report.deployment_hash != deployment_hash { + return Err("deactivate_pipe result deployment_hash mismatch".to_string()); + } + + serde_json::to_value(report) + .map(Some) + .map_err(|err| format!("Failed to encode deactivate_pipe result: {}", err)) + } + "trigger_pipe" => { + let value = result.clone() + .ok_or_else(|| "trigger_pipe result payload is required".to_string())?; + let report: TriggerPipeCommandReport = serde_json::from_value(value) + .map_err(|err| format!("Invalid trigger_pipe result: {}", err))?; + + if report.command_type != "trigger_pipe" { + return Err("trigger_pipe result must include type='trigger_pipe'".to_string()); + } + if report.deployment_hash != deployment_hash { + return Err("trigger_pipe result deployment_hash mismatch".to_string()); + } + + serde_json::to_value(report) + .map(Some) + .map_err(|err| format!("Failed to encode trigger_pipe result: {}", err)) + } _ => Ok(result.clone()), } } @@ -758,6 +865,114 @@ pub struct ProbeEndpointsCommandReport { pub probed_at: String, } +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Pipe: activate_pipe / deactivate_pipe / trigger_pipe +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Request to activate a pipe instance on the agent +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ActivatePipeCommandRequest { + /// UUID of the pipe instance to activate + pub pipe_instance_id: String, + /// Source container name + pub source_container: String, + /// Source endpoint path to watch + pub source_endpoint: String, + /// Source HTTP method (GET, POST, etc.) + #[serde(default = "default_source_method")] + pub source_method: String, + /// Target container name (for internal pipes) + #[serde(default)] + pub target_container: Option, + /// Target external URL (for external pipes) + #[serde(default)] + pub target_url: Option, + /// Target endpoint path + pub target_endpoint: String, + /// Target HTTP method + #[serde(default = "default_target_method")] + pub target_method: String, + /// Field mapping (JSONPath expressions) + pub field_mapping: serde_json::Value, + /// Trigger type: "webhook", "poll", "manual" + #[serde(default = "default_trigger_type")] + pub trigger_type: String, + /// Poll interval in seconds (only for trigger_type=poll) + #[serde(default = "default_poll_interval")] + pub poll_interval_secs: u32, +} + +fn default_source_method() -> String { "GET".to_string() } +fn default_target_method() -> String { "POST".to_string() } +fn default_trigger_type() -> String { "webhook".to_string() } +fn default_poll_interval() -> u32 { 300 } + +/// Request to deactivate a pipe instance on the agent +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct DeactivatePipeCommandRequest { + /// UUID of the pipe instance to deactivate + pub pipe_instance_id: String, +} + +/// Request to trigger a pipe instance manually (one-shot execution) +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct TriggerPipeCommandRequest { + /// UUID of the pipe instance to trigger + pub pipe_instance_id: String, + /// Optional input data to feed into the pipe (overrides source fetch) + #[serde(default)] + pub input_data: Option, +} + +/// Result of a pipe activation +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct ActivatePipeCommandReport { + #[serde(rename = "type")] + pub command_type: String, + pub deployment_hash: String, + pub pipe_instance_id: String, + pub status: String, + pub trigger_type: String, + /// Agent-assigned listener ID (for webhook type) or schedule ID (for poll type) + #[serde(default)] + pub listener_id: Option, + pub activated_at: String, +} + +/// Result of a pipe deactivation +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct DeactivatePipeCommandReport { + #[serde(rename = "type")] + pub command_type: String, + pub deployment_hash: String, + pub pipe_instance_id: String, + pub status: String, + pub deactivated_at: String, +} + +/// Result of a pipe trigger (one-shot execution) +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct TriggerPipeCommandReport { + #[serde(rename = "type")] + pub command_type: String, + pub deployment_hash: String, + pub pipe_instance_id: String, + pub success: bool, + /// Data read from source + #[serde(default)] + pub source_data: Option, + /// Transformed data sent to target + #[serde(default)] + pub mapped_data: Option, + /// Response from target + #[serde(default)] + pub target_response: Option, + /// Error message if failed + #[serde(default)] + pub error: Option, + pub triggered_at: String, +} + #[cfg(test)] mod tests { use super::*; @@ -1158,4 +1373,126 @@ mod tests { assert!(result.is_err()); assert!(result.unwrap_err().contains("runtime must be one of")); } + + #[test] + fn activate_pipe_requires_parameters() { + let err = validate_command_parameters("activate_pipe", &None); + assert!(err.is_err()); + } + + #[test] + fn activate_pipe_validates_trigger_type() { + let err = validate_command_parameters( + "activate_pipe", + &Some(json!({ + "pipe_instance_id": "abc-123", + "source_container": "wordpress_1", + "source_endpoint": "/wp-json/wp/v2/posts", + "target_container": "n8n_1", + "target_endpoint": "/webhook/pipe", + "field_mapping": {"title": "$.title"}, + "trigger_type": "invalid" + })), + ); + assert!(err.is_err()); + assert!(err.unwrap_err().contains("trigger_type")); + } + + #[test] + fn activate_pipe_validates_target_required() { + let err = validate_command_parameters( + "activate_pipe", + &Some(json!({ + "pipe_instance_id": "abc-123", + "source_container": "wordpress_1", + "source_endpoint": "/wp-json/wp/v2/posts", + "target_endpoint": "/webhook/pipe", + "field_mapping": {"title": "$.title"}, + "trigger_type": "webhook" + })), + ); + assert!(err.is_err()); + assert!(err.unwrap_err().contains("target_container")); + } + + #[test] + fn activate_pipe_accepts_valid_params() { + let result = validate_command_parameters( + "activate_pipe", + &Some(json!({ + "pipe_instance_id": "abc-123", + "source_container": "wordpress_1", + "source_endpoint": "/wp-json/wp/v2/posts", + "target_container": "n8n_1", + "target_endpoint": "/webhook/pipe", + "field_mapping": {"title": "$.title"}, + "trigger_type": "webhook" + })), + ); + assert!(result.is_ok()); + } + + #[test] + fn trigger_pipe_requires_instance_id() { + let err = validate_command_parameters( + "trigger_pipe", + &Some(json!({ "pipe_instance_id": "" })), + ); + assert!(err.is_err()); + } + + #[test] + fn trigger_pipe_accepts_valid_params() { + let result = validate_command_parameters( + "trigger_pipe", + &Some(json!({ + "pipe_instance_id": "abc-123" + })), + ); + assert!(result.is_ok()); + } + + #[test] + fn deactivate_pipe_accepts_valid_params() { + let result = validate_command_parameters( + "deactivate_pipe", + &Some(json!({ + "pipe_instance_id": "abc-123" + })), + ); + assert!(result.is_ok()); + } + + #[test] + fn activate_pipe_result_validates() { + let result = validate_command_result( + "activate_pipe", + "deploy-hash", + &Some(json!({ + "type": "activate_pipe", + "deployment_hash": "deploy-hash", + "pipe_instance_id": "abc-123", + "status": "active", + "trigger_type": "webhook", + "activated_at": "2026-01-01T00:00:00Z" + })), + ); + assert!(result.is_ok()); + } + + #[test] + fn trigger_pipe_result_validates() { + let result = validate_command_result( + "trigger_pipe", + "deploy-hash", + &Some(json!({ + "type": "trigger_pipe", + "deployment_hash": "deploy-hash", + "pipe_instance_id": "abc-123", + "success": true, + "triggered_at": "2026-01-01T00:00:00Z" + })), + ); + assert!(result.is_ok()); + } } diff --git a/src/helpers/vault.rs b/src/helpers/vault.rs index 1dcdb127..5145817c 100644 --- a/src/helpers/vault.rs +++ b/src/helpers/vault.rs @@ -2,7 +2,6 @@ use crate::configuration::VaultSettings; use reqwest::Client; use serde_json::json; -#[derive(Debug)] pub struct VaultClient { client: Client, address: String, @@ -12,6 +11,18 @@ pub struct VaultClient { ssh_key_path_prefix: String, } +impl std::fmt::Debug for VaultClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("VaultClient") + .field("address", &self.address) + .field("token", &"[REDACTED]") + .field("agent_path_prefix", &self.agent_path_prefix) + .field("api_prefix", &self.api_prefix) + .field("ssh_key_path_prefix", &self.ssh_key_path_prefix) + .finish() + } +} + impl VaultClient { pub fn new(settings: &VaultSettings) -> Self { Self { @@ -28,7 +39,7 @@ impl VaultClient { } /// Store agent token in Vault at agent/{deployment_hash}/token - #[tracing::instrument(name = "Store agent token in Vault", skip(self, token))] + #[tracing::instrument(name = "Store agent token in Vault", skip_all)] pub async fn store_agent_token( &self, deployment_hash: &str, @@ -77,7 +88,7 @@ impl VaultClient { } /// Fetch agent token from Vault - #[tracing::instrument(name = "Fetch agent token from Vault", skip(self))] + #[tracing::instrument(name = "Fetch agent token from Vault", skip_all)] pub async fn fetch_agent_token(&self, deployment_hash: &str) -> Result { let base = self.address.trim_end_matches('/'); let prefix = self.agent_path_prefix.trim_matches('/'); @@ -129,7 +140,7 @@ impl VaultClient { } /// Delete agent token from Vault - #[tracing::instrument(name = "Delete agent token from Vault", skip(self))] + #[tracing::instrument(name = "Delete agent token from Vault", skip_all)] pub async fn delete_agent_token(&self, deployment_hash: &str) -> Result<(), String> { let base = self.address.trim_end_matches('/'); let prefix = self.agent_path_prefix.trim_matches('/'); @@ -169,7 +180,7 @@ impl VaultClient { /// Store runtime preference for a deployment /// Path: {api_prefix}/{agent_prefix}/{deployment_hash}/runtime - #[tracing::instrument(name = "Store runtime preference in Vault", skip(self))] + #[tracing::instrument(name = "Store runtime preference in Vault", skip_all)] pub async fn store_runtime_preference( &self, deployment_hash: &str, @@ -220,7 +231,7 @@ impl VaultClient { /// Fetch runtime preference from Vault /// Returns None if not set - #[tracing::instrument(name = "Fetch runtime preference from Vault", skip(self))] + #[tracing::instrument(name = "Fetch runtime preference from Vault", skip_all)] pub async fn fetch_runtime_preference( &self, deployment_hash: &str, @@ -275,7 +286,7 @@ impl VaultClient { } /// Delete runtime preference from Vault - #[tracing::instrument(name = "Delete runtime preference from Vault", skip(self))] + #[tracing::instrument(name = "Delete runtime preference from Vault", skip_all)] pub async fn delete_runtime_preference( &self, deployment_hash: &str, @@ -319,7 +330,7 @@ impl VaultClient { /// Fetch org-level runtime policy from Vault /// Path: {api_prefix}/{agent_prefix}/org/{org_id}/runtime_policy /// Returns the required runtime if an org policy exists, None otherwise - #[tracing::instrument(name = "Fetch org runtime policy from Vault", skip(self))] + #[tracing::instrument(name = "Fetch org runtime policy from Vault", skip_all)] pub async fn fetch_org_runtime_policy( &self, org_id: &str, @@ -420,7 +431,7 @@ impl VaultClient { } /// Store SSH keypair in Vault at users/{user_id}/ssh_keys/{server_id} - #[tracing::instrument(name = "Store SSH key in Vault", skip(self, private_key))] + #[tracing::instrument(name = "Store SSH key in Vault", skip_all)] pub async fn store_ssh_key( &self, user_id: &str, @@ -471,7 +482,7 @@ impl VaultClient { } /// Fetch SSH private key from Vault - #[tracing::instrument(name = "Fetch SSH key from Vault", skip(self))] + #[tracing::instrument(name = "Fetch SSH key from Vault", skip_all)] pub async fn fetch_ssh_key(&self, user_id: &str, server_id: i32) -> Result { let path = self.ssh_key_path(user_id, server_id); @@ -513,7 +524,7 @@ impl VaultClient { } /// Fetch SSH public key from Vault - #[tracing::instrument(name = "Fetch SSH public key from Vault", skip(self))] + #[tracing::instrument(name = "Fetch SSH public key from Vault", skip_all)] pub async fn fetch_ssh_public_key( &self, user_id: &str, @@ -559,7 +570,7 @@ impl VaultClient { } /// Delete SSH key from Vault (disconnect) - #[tracing::instrument(name = "Delete SSH key from Vault", skip(self))] + #[tracing::instrument(name = "Delete SSH key from Vault", skip_all)] pub async fn delete_ssh_key(&self, user_id: &str, server_id: i32) -> Result<(), String> { let path = self.ssh_key_path(user_id, server_id); diff --git a/src/mcp/tools/cloud.rs b/src/mcp/tools/cloud.rs index 32c12673..975e9b05 100644 --- a/src/mcp/tools/cloud.rs +++ b/src/mcp/tools/cloud.rs @@ -169,7 +169,7 @@ impl ToolHandler for DeleteCloudTool { .map_err(|e| format!("Cloud error: {}", e))? .ok_or_else(|| "Cloud not found".to_string())?; - db::cloud::delete(&context.pg_pool, args.id) + db::cloud::delete(&context.pg_pool, args.id, &context.user.id) .await .map_err(|e| format!("Failed to delete cloud: {}", e))?; diff --git a/src/mcp/tools/compose.rs b/src/mcp/tools/compose.rs index 75752438..5a196d58 100644 --- a/src/mcp/tools/compose.rs +++ b/src/mcp/tools/compose.rs @@ -30,7 +30,7 @@ impl ToolHandler for DeleteProjectTool { return Err("Unauthorized: You do not own this project".to_string()); } - db::project::delete(&context.pg_pool, args.project_id) + db::project::delete(&context.pg_pool, args.project_id, &context.user.id) .await .map_err(|e| format!("Failed to delete project: {}", e))?; diff --git a/src/models/cloud.rs b/src/models/cloud.rs index 8916c689..e3c65414 100644 --- a/src/models/cloud.rs +++ b/src/models/cloud.rs @@ -1,7 +1,7 @@ use chrono::{DateTime, Utc}; use serde_derive::{Deserialize, Serialize}; -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct Cloud { pub id: i32, pub user_id: String, @@ -15,6 +15,23 @@ pub struct Cloud { pub updated_at: DateTime, } +impl std::fmt::Debug for Cloud { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Cloud") + .field("id", &self.id) + .field("user_id", &self.user_id) + .field("name", &self.name) + .field("provider", &self.provider) + .field("cloud_token", &"[REDACTED]") + .field("cloud_key", &"[REDACTED]") + .field("cloud_secret", &"[REDACTED]") + .field("save_token", &self.save_token) + .field("created_at", &self.created_at) + .field("updated_at", &self.updated_at) + .finish() + } +} + fn mask_string(s: Option<&String>) -> String { match s { Some(val) => val.chars().take(4).collect::() + "****", diff --git a/src/models/user.rs b/src/models/user.rs index 2cb87951..c5510112 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -1,6 +1,6 @@ use serde::Deserialize; -#[derive(Debug, Deserialize, Clone)] +#[derive(Deserialize, Clone)] pub struct User { pub id: String, pub first_name: String, @@ -21,3 +21,17 @@ impl User { self } } + +impl std::fmt::Debug for User { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("User") + .field("id", &self.id) + .field("first_name", &self.first_name) + .field("last_name", &self.last_name) + .field("email", &self.email) + .field("role", &self.role) + .field("email_confirmed", &self.email_confirmed) + .field("access_token", &"[REDACTED]") + .finish() + } +} diff --git a/src/routes/agent/audit.rs b/src/routes/agent/audit.rs index 78225c9e..da4807ca 100644 --- a/src/routes/agent/audit.rs +++ b/src/routes/agent/audit.rs @@ -17,7 +17,7 @@ pub struct IngestResponse { /// /// Auth: `X-Internal-Key` header must match the `INTERNAL_SERVICES_ACCESS_KEY` /// environment variable. -#[tracing::instrument(name = "Agent audit ingest", skip(pool, req, body))] +#[tracing::instrument(name = "Agent audit ingest", skip_all)] #[post("/audit")] pub async fn agent_audit_ingest_handler( req: HttpRequest, @@ -63,7 +63,7 @@ pub struct AuditQueryParams { /// Query the audit log. /// /// Auth: standard JWT or OAuth2 user auth (handled by middleware). -#[tracing::instrument(name = "Agent audit query", skip(pool))] +#[tracing::instrument(name = "Agent audit query", skip_all)] #[get("/audit")] pub async fn agent_audit_query_handler( params: web::Query, diff --git a/src/routes/agent/enqueue.rs b/src/routes/agent/enqueue.rs index 5a2df6dd..8c2262b3 100644 --- a/src/routes/agent/enqueue.rs +++ b/src/routes/agent/enqueue.rs @@ -18,7 +18,7 @@ pub struct EnqueueRequest { pub timeout_seconds: Option, } -#[tracing::instrument(name = "Agent enqueue command", skip(agent_pool, user))] +#[tracing::instrument(name = "Agent enqueue command", skip_all)] #[post("/commands/enqueue")] pub async fn enqueue_handler( user: web::ReqData>, @@ -33,6 +33,21 @@ pub async fn enqueue_handler( return Err(JsonResponse::<()>::build().bad_request("command_type is required")); } + // Verify deployment belongs to the requesting user + let deployment = db::deployment::fetch_by_deployment_hash( + agent_pool.as_ref(), + &payload.deployment_hash, + ) + .await + .map_err(|err| JsonResponse::<()>::build().internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::<()>::build().not_found("Deployment not found")); + } + } + // Validate parameters let validated_parameters = status_panel::validate_command_parameters(&payload.command_type, &payload.parameters) diff --git a/src/routes/agent/link.rs b/src/routes/agent/link.rs index 7f42322e..9a116757 100644 --- a/src/routes/agent/link.rs +++ b/src/routes/agent/link.rs @@ -38,7 +38,7 @@ fn generate_agent_token() -> String { /// The session_token proves the user authenticated via /api/v1/agent/login. /// Stacker validates token ownership, checks the user owns the deployment, /// then creates or returns an agent with credentials. -#[tracing::instrument(name = "Link agent to deployment", skip(agent_pool, vault_client, user_service, req))] +#[tracing::instrument(name = "Link agent to deployment", skip_all)] #[post("/link")] pub async fn link_handler( payload: web::Json, diff --git a/src/routes/agent/login.rs b/src/routes/agent/login.rs index 4805fc01..93346c31 100644 --- a/src/routes/agent/login.rs +++ b/src/routes/agent/login.rs @@ -5,12 +5,21 @@ use actix_web::{post, web, HttpRequest, HttpResponse, Result}; use serde::{Deserialize, Serialize}; use std::sync::Arc; -#[derive(Debug, Deserialize)] +#[derive(Deserialize)] pub struct AgentLoginRequest { pub email: String, pub password: String, } +impl std::fmt::Debug for AgentLoginRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AgentLoginRequest") + .field("email", &self.email) + .field("password", &"[REDACTED]") + .finish() + } +} + #[derive(Debug, Serialize)] pub struct DeploymentInfo { pub deployment_id: String, @@ -32,7 +41,7 @@ pub struct AgentLoginResponse { /// Proxy login for Status Panel agents. Authenticates the user against /// the TryDirect OAuth server, then returns a session token and the /// user's deployments so the agent can pick one to link to. -#[tracing::instrument(name = "Agent proxy login", skip(settings, api_pool, user_service, _req))] +#[tracing::instrument(name = "Agent proxy login", skip_all)] #[post("/login")] pub async fn login_handler( payload: web::Json, diff --git a/src/routes/agent/register.rs b/src/routes/agent/register.rs index 7427bce9..11cfcb75 100644 --- a/src/routes/agent/register.rs +++ b/src/routes/agent/register.rs @@ -43,7 +43,7 @@ fn generate_agent_token() -> String { .collect() } -#[tracing::instrument(name = "Register agent", skip(agent_pool, vault_client, req))] +#[tracing::instrument(name = "Register agent", skip_all)] #[post("/register")] pub async fn register_handler( payload: web::Json, diff --git a/src/routes/agent/report.rs b/src/routes/agent/report.rs index 8c2cf61c..05bf8411 100644 --- a/src/routes/agent/report.rs +++ b/src/routes/agent/report.rs @@ -39,10 +39,7 @@ pub struct CommandReportResponse { pub message: String, } -#[tracing::instrument( - name = "Agent report command result", - skip(agent_pool, mq_manager, _req) -)] +#[tracing::instrument(name = "Agent report command result", skip_all)] #[post("/commands/report")] pub async fn report_handler( agent: web::ReqData>, diff --git a/src/routes/agent/snapshot.rs b/src/routes/agent/snapshot.rs index e1cf74bb..e3d4c7b3 100644 --- a/src/routes/agent/snapshot.rs +++ b/src/routes/agent/snapshot.rs @@ -46,7 +46,7 @@ fn default_command_limit() -> i64 { 50 } -#[tracing::instrument(name = "Get deployment snapshot", skip(agent_pool, query))] +#[tracing::instrument(name = "Get deployment snapshot", skip_all)] #[get("/deployments/{deployment_hash}")] pub async fn snapshot_handler( path: web::Path, @@ -189,7 +189,7 @@ pub async fn snapshot_handler( /// Returns the snapshot for the most recently active agent in a project. /// Used by the CLI as a stable project-scoped alternative to deployment-hash lookup. -#[tracing::instrument(name = "Get project agent snapshot", skip(agent_pool))] +#[tracing::instrument(name = "Get project agent snapshot", skip_all)] #[get("/project/{project_id}")] pub async fn project_snapshot_handler( path: web::Path, diff --git a/src/routes/agent/wait.rs b/src/routes/agent/wait.rs index 92c8927c..cbd352ce 100644 --- a/src/routes/agent/wait.rs +++ b/src/routes/agent/wait.rs @@ -10,7 +10,7 @@ pub struct WaitQuery { pub interval: Option, } -#[tracing::instrument(name = "Agent poll for commands", skip(agent_pool, _req))] +#[tracing::instrument(name = "Agent poll for commands", skip_all)] #[get("/commands/wait/{deployment_hash}")] pub async fn wait_handler( agent: web::ReqData>, diff --git a/src/routes/agreement/add.rs b/src/routes/agreement/add.rs index 7f3e7fe7..e0fd98f2 100644 --- a/src/routes/agreement/add.rs +++ b/src/routes/agreement/add.rs @@ -7,7 +7,7 @@ use serde_valid::Validate; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Admin add agreement.")] +#[tracing::instrument(name = "Admin add agreement.", skip_all)] #[post("")] pub async fn admin_add_handler( form: web::Json, @@ -31,7 +31,7 @@ pub async fn admin_add_handler( }) } -#[tracing::instrument(name = "Add user agreement.")] +#[tracing::instrument(name = "Add user agreement.", skip_all)] #[post("")] pub async fn user_add_handler( user: web::ReqData>, diff --git a/src/routes/agreement/get.rs b/src/routes/agreement/get.rs index 20d469a3..1df27fe5 100644 --- a/src/routes/agreement/get.rs +++ b/src/routes/agreement/get.rs @@ -5,10 +5,10 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Get agreement by id.")] +#[tracing::instrument(name = "Get agreement by id.", skip_all)] #[get("/{id}")] pub async fn get_handler( - user: web::ReqData>, + _user: web::ReqData>, path: web::Path<(i32,)>, pg_pool: web::Data, ) -> Result { @@ -23,7 +23,7 @@ pub async fn get_handler( }) } -#[tracing::instrument(name = "Check if agreement signed/accepted.")] +#[tracing::instrument(name = "Check if agreement signed/accepted.", skip_all)] #[get("/accepted/{id}")] pub async fn accept_handler( user: web::ReqData>, diff --git a/src/routes/agreement/update.rs b/src/routes/agreement/update.rs index 28f2ade1..531f1acd 100644 --- a/src/routes/agreement/update.rs +++ b/src/routes/agreement/update.rs @@ -6,7 +6,7 @@ use actix_web::{put, web, Responder, Result}; use serde_valid::Validate; use sqlx::PgPool; -#[tracing::instrument(name = "Admin update agreement.")] +#[tracing::instrument(name = "Admin update agreement.", skip_all)] #[put("/{id}")] pub async fn admin_update_handler( path: web::Path<(i32,)>, diff --git a/src/routes/chat/delete.rs b/src/routes/chat/delete.rs index 2112f2f9..86bec892 100644 --- a/src/routes/chat/delete.rs +++ b/src/routes/chat/delete.rs @@ -13,7 +13,7 @@ pub struct Query { /// DELETE /chat/history?project_id={id} /// Clears the stored chat conversation for the logged-in user. -#[tracing::instrument(name = "Delete chat history.")] +#[tracing::instrument(name = "Delete chat history.", skip_all)] #[delete("/history")] pub async fn item( user: web::ReqData>, diff --git a/src/routes/chat/get.rs b/src/routes/chat/get.rs index 29b412c3..fed31ef6 100644 --- a/src/routes/chat/get.rs +++ b/src/routes/chat/get.rs @@ -14,7 +14,7 @@ pub struct Query { /// GET /chat/history?project_id={id} /// Returns the saved chat conversation for the logged-in user. /// project_id is optional; omit for canvas/onboarding mode. -#[tracing::instrument(name = "Get chat history.")] +#[tracing::instrument(name = "Get chat history.", skip_all)] #[get("/history")] pub async fn item( user: web::ReqData>, diff --git a/src/routes/chat/upsert.rs b/src/routes/chat/upsert.rs index 51c7111a..bf3dee75 100644 --- a/src/routes/chat/upsert.rs +++ b/src/routes/chat/upsert.rs @@ -15,7 +15,7 @@ pub struct ChatHistoryRequest { /// PUT /chat/history /// Upserts the chat conversation for the logged-in user. -#[tracing::instrument(name = "Upsert chat history.")] +#[tracing::instrument(name = "Upsert chat history.", skip_all)] #[put("/history")] pub async fn item( user: web::ReqData>, diff --git a/src/routes/client/add.rs b/src/routes/client/add.rs index bddbb74a..4fea4d24 100644 --- a/src/routes/client/add.rs +++ b/src/routes/client/add.rs @@ -7,7 +7,7 @@ use actix_web::{post, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Add client.")] +#[tracing::instrument(name = "Add client.", skip_all)] #[post("")] pub async fn add_handler( user: web::ReqData>, diff --git a/src/routes/client/disable.rs b/src/routes/client/disable.rs index 7672ea01..41b30001 100644 --- a/src/routes/client/disable.rs +++ b/src/routes/client/disable.rs @@ -6,11 +6,11 @@ use actix_web::{put, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "User disable client.")] +#[tracing::instrument(name = "User disable client.", skip_all)] #[put("/{id}/disable")] pub async fn disable_handler( user: web::ReqData>, - settings: web::Data, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { @@ -29,11 +29,11 @@ pub async fn disable_handler( disable_client(pg_pool.get_ref(), client).await } -#[tracing::instrument(name = "Admin disable client.")] +#[tracing::instrument(name = "Admin disable client.", skip_all)] #[put("/{id}/disable")] pub async fn admin_disable_handler( - user: web::ReqData>, - settings: web::Data, + _user: web::ReqData>, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { diff --git a/src/routes/client/enable.rs b/src/routes/client/enable.rs index e3955a6d..3f090f6a 100644 --- a/src/routes/client/enable.rs +++ b/src/routes/client/enable.rs @@ -7,11 +7,11 @@ use actix_web::{put, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "User enable client.")] +#[tracing::instrument(name = "User enable client.", skip_all)] #[put("/{id}/enable")] pub async fn enable_handler( user: web::ReqData>, - settings: web::Data, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { @@ -28,11 +28,11 @@ pub async fn enable_handler( enable_client(pg_pool.get_ref(), client).await } -#[tracing::instrument(name = "Admin enable client.")] +#[tracing::instrument(name = "Admin enable client.", skip_all)] #[put("/{id}/enable")] pub async fn admin_enable_handler( - user: web::ReqData>, - settings: web::Data, + _user: web::ReqData>, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { diff --git a/src/routes/client/update.rs b/src/routes/client/update.rs index de095814..49f5c995 100644 --- a/src/routes/client/update.rs +++ b/src/routes/client/update.rs @@ -6,11 +6,11 @@ use actix_web::{put, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "User update client.")] +#[tracing::instrument(name = "User update client.", skip_all)] #[put("/{id}")] pub async fn update_handler( user: web::ReqData>, - settings: web::Data, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { @@ -27,11 +27,11 @@ pub async fn update_handler( update_client(pg_pool.get_ref(), client).await } -#[tracing::instrument(name = "Admin update client.")] +#[tracing::instrument(name = "Admin update client.", skip_all)] #[put("/{id}")] pub async fn admin_update_handler( - user: web::ReqData>, - settings: web::Data, + _user: web::ReqData>, + _settings: web::Data, pg_pool: web::Data, path: web::Path<(i32,)>, ) -> Result { diff --git a/src/routes/cloud/add.rs b/src/routes/cloud/add.rs index f6d34c7c..97ca6203 100644 --- a/src/routes/cloud/add.rs +++ b/src/routes/cloud/add.rs @@ -8,7 +8,7 @@ use sqlx::PgPool; use std::ops::Deref; use std::sync::Arc; -#[tracing::instrument(name = "Add cloud.")] +#[tracing::instrument(name = "Add cloud.", skip_all)] #[post("")] pub async fn add( user: web::ReqData>, diff --git a/src/routes/cloud/delete.rs b/src/routes/cloud/delete.rs index 2347220b..96592150 100644 --- a/src/routes/cloud/delete.rs +++ b/src/routes/cloud/delete.rs @@ -6,7 +6,7 @@ use actix_web::{delete, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Delete cloud record of a user.")] +#[tracing::instrument(name = "Delete cloud record of a user.", skip_all)] #[delete("/{id}")] pub async fn item( user: web::ReqData>, @@ -27,7 +27,7 @@ pub async fn item( None => Err(JsonResponse::::build().not_found("not found")), })?; - db::cloud::delete(pg_pool.get_ref(), cloud.id) + db::cloud::delete(pg_pool.get_ref(), cloud.id, &user.id) .await .map_err(|err| JsonResponse::::build().internal_server_error(err)) .and_then(|result| match result { diff --git a/src/routes/cloud/get.rs b/src/routes/cloud/get.rs index cd7e8222..ad8af5a2 100644 --- a/src/routes/cloud/get.rs +++ b/src/routes/cloud/get.rs @@ -6,7 +6,7 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Get cloud credentials.")] +#[tracing::instrument(name = "Get cloud credentials.", skip_all)] #[get("/{id}")] pub async fn item( path: web::Path<(i32,)>, @@ -29,10 +29,10 @@ pub async fn item( }) } -#[tracing::instrument(name = "Get all clouds.")] +#[tracing::instrument(name = "Get all clouds.", skip_all)] #[get("")] pub async fn list( - path: web::Path<()>, + _path: web::Path<()>, user: web::ReqData>, pg_pool: web::Data, ) -> Result { diff --git a/src/routes/cloud/update.rs b/src/routes/cloud/update.rs index 42d4c26a..60b698ab 100644 --- a/src/routes/cloud/update.rs +++ b/src/routes/cloud/update.rs @@ -8,7 +8,7 @@ use sqlx::PgPool; use std::ops::Deref; use std::sync::Arc; -#[tracing::instrument(name = "Update cloud.")] +#[tracing::instrument(name = "Update cloud.", skip_all)] #[put("/{id}")] pub async fn item( path: web::Path<(i32,)>, @@ -52,7 +52,7 @@ pub async fn item( } } - tracing::debug!("Updating cloud {:?}", cloud); + tracing::debug!("Updating cloud id={} provider={}", cloud.id, cloud.provider); db::cloud::update(pg_pool.get_ref(), cloud) .await diff --git a/src/routes/command/cancel.rs b/src/routes/command/cancel.rs index c384c422..65dbf9fa 100644 --- a/src/routes/command/cancel.rs +++ b/src/routes/command/cancel.rs @@ -5,7 +5,7 @@ use actix_web::{post, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Cancel command", skip(pg_pool, user))] +#[tracing::instrument(name = "Cancel command", skip_all)] #[post("/{deployment_hash}/{command_id}/cancel")] pub async fn cancel_handler( user: web::ReqData>, diff --git a/src/routes/command/create.rs b/src/routes/command/create.rs index eed9f984..c5b0affa 100644 --- a/src/routes/command/create.rs +++ b/src/routes/command/create.rs @@ -33,7 +33,7 @@ pub struct CreateCommandResponse { pub status: String, } -#[tracing::instrument(name = "Create command", skip(pg_pool, user, settings))] +#[tracing::instrument(name = "Create command", skip_all)] #[post("")] pub async fn create_handler( user: web::ReqData>, diff --git a/src/routes/command/get.rs b/src/routes/command/get.rs index 8a811d06..7373f1f6 100644 --- a/src/routes/command/get.rs +++ b/src/routes/command/get.rs @@ -5,7 +5,7 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Get command by ID", skip(pg_pool, user))] +#[tracing::instrument(name = "Get command by ID", skip_all)] #[get("/{deployment_hash}/{command_id}")] pub async fn get_handler( user: web::ReqData>, @@ -14,6 +14,19 @@ pub async fn get_handler( ) -> Result { let (deployment_hash, command_id) = path.into_inner(); + // Verify deployment belongs to the requesting user + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &deployment_hash) + .await + .map_err(|err| JsonResponse::internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Deployment not found")); + } + } + // Fetch command by its string command_id (e.g. "cmd_"), not the row UUID let command = db::command::fetch_by_command_id(pg_pool.get_ref(), &command_id) .await diff --git a/src/routes/command/list.rs b/src/routes/command/list.rs index e15b834a..3ec2936e 100644 --- a/src/routes/command/list.rs +++ b/src/routes/command/list.rs @@ -17,7 +17,7 @@ pub struct CommandListQuery { pub include_results: bool, } -#[tracing::instrument(name = "List commands for deployment", skip(pg_pool, user))] +#[tracing::instrument(name = "List commands for deployment", skip_all)] #[get("/{deployment_hash}")] pub async fn list_handler( user: web::ReqData>, @@ -28,6 +28,19 @@ pub async fn list_handler( let deployment_hash = path.into_inner(); let limit = query.limit.unwrap_or(50).max(1).min(500); + // Verify deployment belongs to the requesting user + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &deployment_hash) + .await + .map_err(|err| JsonResponse::internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Deployment not found")); + } + } + let commands = if let Some(since_raw) = &query.since { let since = DateTime::parse_from_rfc3339(since_raw) .map_err(|_err| JsonResponse::bad_request("Invalid since timestamp"))? diff --git a/src/routes/deployment/capabilities.rs b/src/routes/deployment/capabilities.rs index ff08043a..7689634c 100644 --- a/src/routes/deployment/capabilities.rs +++ b/src/routes/deployment/capabilities.rs @@ -96,7 +96,7 @@ const COMMAND_CATALOG: &[CommandMetadata] = &[ }, ]; -#[tracing::instrument(name = "Get agent capabilities", skip(pg_pool))] +#[tracing::instrument(name = "Get agent capabilities", skip_all)] #[get("/{deployment_hash}/capabilities")] pub async fn capabilities_handler( path: web::Path, diff --git a/src/routes/deployment/force_complete.rs b/src/routes/deployment/force_complete.rs index 13a96c17..bdbde3c3 100644 --- a/src/routes/deployment/force_complete.rs +++ b/src/routes/deployment/force_complete.rs @@ -22,7 +22,7 @@ pub struct ForceCompleteQuery { /// Without `?force=true`: only `paused` or `error` are accepted. /// With `?force=true`: `in_progress` is also accepted. /// Only the owning user may invoke this. -#[tracing::instrument(name = "Force-complete deployment", skip(pg_pool))] +#[tracing::instrument(name = "Force-complete deployment", skip_all)] #[post("/{id}/force-complete")] pub async fn force_complete_handler( path: web::Path, diff --git a/src/routes/deployment/status.rs b/src/routes/deployment/status.rs index 142a4abc..50c7c0aa 100644 --- a/src/routes/deployment/status.rs +++ b/src/routes/deployment/status.rs @@ -49,7 +49,7 @@ impl From for DeploymentStatusResponse { /// `GET /api/v1/deployments/hash/{hash}` /// /// Fetch a deployment by its deployment hash string. -#[tracing::instrument(name = "Get deployment status by hash", skip(pg_pool))] +#[tracing::instrument(name = "Get deployment status by hash", skip_all)] #[get("/hash/{hash}")] pub async fn status_by_hash_handler( path: web::Path, @@ -82,7 +82,7 @@ pub async fn status_by_hash_handler( /// /// Fetch deployment status by deployment ID. /// Requires authentication (inherited from the `/api` scope middleware). -#[tracing::instrument(name = "Get deployment status by ID", skip(pg_pool))] +#[tracing::instrument(name = "Get deployment status by ID", skip_all)] #[get("/{id}")] pub async fn status_handler( path: web::Path, @@ -115,7 +115,7 @@ pub async fn status_handler( /// `GET /api/v1/deployments` /// /// List deployments for the authenticated user. -#[tracing::instrument(name = "List deployments", skip(pg_pool, user))] +#[tracing::instrument(name = "List deployments", skip_all)] #[get("")] pub async fn list_handler( user: web::ReqData>, @@ -145,7 +145,7 @@ pub async fn list_handler( /// /// Fetch the latest deployment status for a project. /// Returns the most recent (non-deleted) deployment. -#[tracing::instrument(name = "Get deployment status by project ID", skip(pg_pool))] +#[tracing::instrument(name = "Get deployment status by project ID", skip_all)] #[get("/project/{project_id}")] pub async fn status_by_project_handler( path: web::Path, diff --git a/src/routes/dockerhub/mod.rs b/src/routes/dockerhub/mod.rs index 83215ad1..b30efc84 100644 --- a/src/routes/dockerhub/mod.rs +++ b/src/routes/dockerhub/mod.rs @@ -23,11 +23,8 @@ pub struct RepositoryPath { pub repository: String, } -#[tracing::instrument( - name = "dockerhub_search_namespaces", - skip(connector), - fields(query = query.q.as_deref().unwrap_or_default()) -)] +#[tracing::instrument(name = "dockerhub_search_namespaces", + fields(query = query.q.as_deref().unwrap_or_default()), skip_all)] #[get("/namespaces")] pub async fn search_namespaces( connector: web::Data>, @@ -45,11 +42,8 @@ pub async fn search_namespaces( .map_err(Error::from) } -#[tracing::instrument( - name = "dockerhub_list_repositories", - skip(connector), - fields(namespace = %path.namespace, query = query.q.as_deref().unwrap_or_default()) -)] +#[tracing::instrument(name = "dockerhub_list_repositories", + fields(namespace = %path.namespace, query = query.q.as_deref().unwrap_or_default()), skip_all)] #[get("/{namespace}/repositories")] pub async fn list_repositories( connector: web::Data>, @@ -68,11 +62,8 @@ pub async fn list_repositories( .map_err(Error::from) } -#[tracing::instrument( - name = "dockerhub_list_tags", - skip(connector), - fields(namespace = %path.namespace, repository = %path.repository, query = query.q.as_deref().unwrap_or_default()) -)] +#[tracing::instrument(name = "dockerhub_list_tags", + fields(namespace = %path.namespace, repository = %path.repository, query = query.q.as_deref().unwrap_or_default()), skip_all)] #[get("/{namespace}/repositories/{repository}/tags")] pub async fn list_tags( connector: web::Data>, @@ -90,7 +81,7 @@ pub async fn list_tags( /// Receive a DockerHub autocomplete analytics event from the stack builder UI. /// The payload is `{event: string, payload: any}` — logged and discarded. /// Returns 204 No Content so the browser's fire-and-forget fetch succeeds. -#[tracing::instrument(name = "dockerhub_log_event", skip(body))] +#[tracing::instrument(name = "dockerhub_log_event", skip_all)] #[post("/events")] pub async fn log_event(body: web::Json) -> HttpResponse { tracing::debug!(event = ?body, "dockerhub autocomplete event received"); diff --git a/src/routes/marketplace/admin.rs b/src/routes/marketplace/admin.rs index 6e3a7dda..c9f9b325 100644 --- a/src/routes/marketplace/admin.rs +++ b/src/routes/marketplace/admin.rs @@ -10,7 +10,7 @@ use std::sync::Arc; use tracing::Instrument; use uuid; -#[tracing::instrument(name = "List submitted templates (admin)")] +#[tracing::instrument(name = "List submitted templates (admin)", skip_all)] #[get("")] pub async fn list_submitted_handler( _admin: web::ReqData>, // role enforced by Casbin @@ -24,7 +24,7 @@ pub async fn list_submitted_handler( .map(|templates| JsonResponse::build().set_list(templates).ok("OK")) } -#[tracing::instrument(name = "Get template detail (admin)")] +#[tracing::instrument(name = "Get template detail (admin)", skip_all)] #[get("/{id}")] pub async fn detail_handler( _admin: web::ReqData>, @@ -66,7 +66,7 @@ pub struct AdminDecisionRequest { pub reason: Option, } -#[tracing::instrument(name = "Approve template (admin)")] +#[tracing::instrument(name = "Approve template (admin)", skip_all)] #[post("/{id}/approve")] pub async fn approve_handler( admin: web::ReqData>, // role enforced by Casbin @@ -136,7 +136,7 @@ pub async fn approve_handler( Ok(JsonResponse::::build().ok("Approved")) } -#[tracing::instrument(name = "Reject template (admin)")] +#[tracing::instrument(name = "Reject template (admin)", skip_all)] #[post("/{id}/reject")] pub async fn reject_handler( admin: web::ReqData>, // role enforced by Casbin @@ -196,7 +196,7 @@ pub struct UnapproveRequest { pub reason: Option, } -#[tracing::instrument(name = "Unapprove template (admin)")] +#[tracing::instrument(name = "Unapprove template (admin)", skip_all)] #[post("/{id}/unapprove")] pub async fn unapprove_handler( admin: web::ReqData>, @@ -248,7 +248,7 @@ pub async fn unapprove_handler( Ok(JsonResponse::::build().ok("Template unapproved and hidden from marketplace")) } -#[tracing::instrument(name = "Security scan template (admin)")] +#[tracing::instrument(name = "Security scan template (admin)", skip_all)] #[post("/{id}/security-scan")] pub async fn security_scan_handler( admin: web::ReqData>, @@ -331,7 +331,7 @@ pub async fn security_scan_handler( .ok("Security scan completed")) } -#[tracing::instrument(name = "List available plans from User Service", skip(user_service))] +#[tracing::instrument(name = "List available plans from User Service", skip_all)] #[get("/plans")] pub async fn list_plans_handler( _admin: web::ReqData>, // role enforced by Casbin @@ -370,7 +370,7 @@ pub struct AdminPricingRequest { pub currency: Option, } -#[tracing::instrument(name = "Admin update template pricing")] +#[tracing::instrument(name = "Admin update template pricing", skip_all)] #[patch("/{id}/pricing")] pub async fn pricing_handler( _admin: web::ReqData>, @@ -415,7 +415,7 @@ pub struct AdminVerificationsRequest { pub hardened_images: Option, } -#[tracing::instrument(name = "Admin update template verifications")] +#[tracing::instrument(name = "Admin update template verifications", skip_all)] #[patch("/{id}/verifications")] pub async fn update_verifications_handler( _admin: web::ReqData>, diff --git a/src/routes/marketplace/agent.rs b/src/routes/marketplace/agent.rs index 42861ecb..fbc296cf 100644 --- a/src/routes/marketplace/agent.rs +++ b/src/routes/marketplace/agent.rs @@ -29,7 +29,7 @@ fn generate_token() -> String { .collect() } -#[tracing::instrument(name = "Register marketplace agent", skip(_pg_pool))] +#[tracing::instrument(name = "Register marketplace agent", skip_all)] #[post("/register")] pub async fn register_marketplace_agent_handler( _pg_pool: web::Data, diff --git a/src/routes/marketplace/categories.rs b/src/routes/marketplace/categories.rs index 22304d6c..33c6062a 100644 --- a/src/routes/marketplace/categories.rs +++ b/src/routes/marketplace/categories.rs @@ -4,7 +4,7 @@ use crate::models; use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; -#[tracing::instrument(name = "List categories")] +#[tracing::instrument(name = "List categories", skip_all)] #[get("/categories")] pub async fn list_handler(pg_pool: web::Data) -> Result { db::marketplace::get_categories(pg_pool.get_ref()) diff --git a/src/routes/marketplace/creator.rs b/src/routes/marketplace/creator.rs index 31b2b711..e14a23a1 100644 --- a/src/routes/marketplace/creator.rs +++ b/src/routes/marketplace/creator.rs @@ -26,7 +26,7 @@ pub struct CreateTemplateRequest { pub currency: Option, } -#[tracing::instrument(name = "Create draft template")] +#[tracing::instrument(name = "Create draft template", skip_all)] #[post("")] pub async fn create_handler( user: web::ReqData>, @@ -143,7 +143,7 @@ pub struct UpdateTemplateRequest { pub currency: Option, } -#[tracing::instrument(name = "Update template metadata")] +#[tracing::instrument(name = "Update template metadata", skip_all)] #[put("/{id}")] pub async fn update_handler( user: web::ReqData>, @@ -198,7 +198,7 @@ pub async fn update_handler( } } -#[tracing::instrument(name = "Submit template for review")] +#[tracing::instrument(name = "Submit template for review", skip_all)] #[post("/{id}/submit")] pub async fn submit_handler( user: web::ReqData>, @@ -240,7 +240,7 @@ pub struct ResubmitRequest { pub changelog: Option, } -#[tracing::instrument(name = "Resubmit template with new version")] +#[tracing::instrument(name = "Resubmit template with new version", skip_all)] #[post("/{id}/resubmit")] pub async fn resubmit_handler( user: web::ReqData>, @@ -288,7 +288,7 @@ pub async fn resubmit_handler( .ok("Resubmitted for review")) } -#[tracing::instrument(name = "List my templates")] +#[tracing::instrument(name = "List my templates", skip_all)] #[get("/mine")] pub async fn mine_handler( user: Option>>, @@ -303,7 +303,7 @@ pub async fn mine_handler( .map(|templates| JsonResponse::build().set_list(templates).ok("OK")) } -#[tracing::instrument(name = "List reviews for my template")] +#[tracing::instrument(name = "List reviews for my template", skip_all)] #[get("/{id}/reviews")] pub async fn my_reviews_handler( user: Option>>, diff --git a/src/routes/marketplace/public.rs b/src/routes/marketplace/public.rs index 0a6bdf98..257899af 100644 --- a/src/routes/marketplace/public.rs +++ b/src/routes/marketplace/public.rs @@ -3,7 +3,7 @@ use crate::helpers::JsonResponse; use actix_web::{get, web, HttpResponse, Responder, Result}; use sqlx::PgPool; -#[tracing::instrument(name = "List approved templates (public)")] +#[tracing::instrument(name = "List approved templates (public)", skip_all)] #[get("")] pub async fn list_handler( query: web::Query, @@ -21,7 +21,7 @@ pub async fn list_handler( .map(|templates| JsonResponse::build().set_list(templates).ok("OK")) } -#[tracing::instrument(name = "Generate install script")] +#[tracing::instrument(name = "Generate install script", skip_all)] #[get("/install/{purchase_token}")] pub async fn install_script_handler(path: web::Path) -> Result { let purchase_token = path.into_inner(); @@ -104,7 +104,7 @@ echo "" ) } -#[tracing::instrument(name = "Download stack archive")] +#[tracing::instrument(name = "Download stack archive", skip_all)] #[get("/download/{purchase_token}")] pub async fn download_stack_handler( path: web::Path, @@ -138,7 +138,7 @@ pub struct TemplateListQuery { pub sort: Option, // recent|popular|rating } -#[tracing::instrument(name = "Get template by slug (public)")] +#[tracing::instrument(name = "Get template by slug (public)", skip_all)] #[get("/{slug}")] pub async fn detail_handler( path: web::Path<(String,)>, diff --git a/src/routes/pipe/create.rs b/src/routes/pipe/create.rs index cab76f39..8d92af2c 100644 --- a/src/routes/pipe/create.rs +++ b/src/routes/pipe/create.rs @@ -41,7 +41,7 @@ pub struct CreatePipeInstanceRequest { pub config_override: Option, } -#[tracing::instrument(name = "Create pipe template", skip(pg_pool, user))] +#[tracing::instrument(name = "Create pipe template", skip_all)] #[post("/templates")] pub async fn create_template_handler( user: web::ReqData>, @@ -100,7 +100,7 @@ pub async fn create_template_handler( .created("Pipe template created successfully")) } -#[tracing::instrument(name = "Create pipe instance", skip(pg_pool, user))] +#[tracing::instrument(name = "Create pipe instance", skip_all)] #[post("/instances")] pub async fn create_instance_handler( user: web::ReqData>, @@ -118,6 +118,19 @@ pub async fn create_instance_handler( .bad_request("either target_container or target_url is required")); } + // Verify deployment belongs to the requesting user + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), req.deployment_hash.trim()) + .await + .map_err(|err| JsonResponse::<()>::build().internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::<()>::build().not_found("Deployment not found")); + } + } + // Verify template exists if provided if let Some(template_id) = &req.template_id { let template = db::pipe::get_template(pg_pool.get_ref(), template_id) diff --git a/src/routes/pipe/delete.rs b/src/routes/pipe/delete.rs index 0ba15573..71efb680 100644 --- a/src/routes/pipe/delete.rs +++ b/src/routes/pipe/delete.rs @@ -11,15 +11,33 @@ struct DeleteResponse { deleted: bool, } -#[tracing::instrument(name = "Delete pipe template", skip(pg_pool, _user))] +#[tracing::instrument(name = "Delete pipe template", skip_all)] #[delete("/templates/{template_id}")] pub async fn delete_template_handler( - _user: web::ReqData>, + user: web::ReqData>, path: web::Path, pg_pool: web::Data, ) -> Result { let template_id = path.into_inner(); + // Verify the template belongs to the requesting user + let template = db::pipe::get_template(pg_pool.get_ref(), &template_id) + .await + .map_err(|err| { + tracing::error!("Failed to fetch pipe template: {}", err); + JsonResponse::<()>::build().internal_server_error(err) + })?; + + match &template { + Some(t) if t.created_by == user.id => {} + Some(_) => { + return Err(JsonResponse::not_found("Pipe template not found")); + } + None => { + return Err(JsonResponse::not_found("Pipe template not found")); + } + } + let deleted = db::pipe::delete_template(pg_pool.get_ref(), &template_id) .await .map_err(|err| { @@ -36,15 +54,42 @@ pub async fn delete_template_handler( } } -#[tracing::instrument(name = "Delete pipe instance", skip(pg_pool, _user))] +#[tracing::instrument(name = "Delete pipe instance", skip_all)] #[delete("/instances/{instance_id}")] pub async fn delete_instance_handler( - _user: web::ReqData>, + user: web::ReqData>, path: web::Path, pg_pool: web::Data, ) -> Result { let instance_id = path.into_inner(); + // Verify the instance belongs to the requesting user via deployment ownership + let instance = db::pipe::get_instance(pg_pool.get_ref(), &instance_id) + .await + .map_err(|err| { + tracing::error!("Failed to fetch pipe instance: {}", err); + JsonResponse::<()>::build().internal_server_error(err) + })?; + + match &instance { + Some(i) => { + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &i.deployment_hash) + .await + .map_err(|err| JsonResponse::<()>::build().internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Pipe instance not found")); + } + } + } + None => { + return Err(JsonResponse::not_found("Pipe instance not found")); + } + } + let deleted = db::pipe::delete_instance(pg_pool.get_ref(), &instance_id) .await .map_err(|err| { diff --git a/src/routes/pipe/get.rs b/src/routes/pipe/get.rs index 53cdb6ed..6b6eb3e1 100644 --- a/src/routes/pipe/get.rs +++ b/src/routes/pipe/get.rs @@ -5,10 +5,10 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Get pipe template by ID", skip(pg_pool, _user))] +#[tracing::instrument(name = "Get pipe template by ID", skip_all)] #[get("/templates/{template_id}")] pub async fn get_template_handler( - _user: web::ReqData>, + user: web::ReqData>, path: web::Path, pg_pool: web::Data, ) -> Result { @@ -22,17 +22,23 @@ pub async fn get_template_handler( })?; match template { - Some(t) => Ok(JsonResponse::build() - .set_item(Some(t)) - .ok("Pipe template fetched successfully")), + Some(t) => { + // Only allow access to own templates or public ones + if !t.is_public.unwrap_or(false) && t.created_by != user.id { + return Err(JsonResponse::not_found("Pipe template not found")); + } + Ok(JsonResponse::build() + .set_item(Some(t)) + .ok("Pipe template fetched successfully")) + } None => Err(JsonResponse::not_found("Pipe template not found")), } } -#[tracing::instrument(name = "Get pipe instance by ID", skip(pg_pool, _user))] +#[tracing::instrument(name = "Get pipe instance by ID", skip_all)] #[get("/instances/detail/{instance_id}")] pub async fn get_instance_handler( - _user: web::ReqData>, + user: web::ReqData>, path: web::Path, pg_pool: web::Data, ) -> Result { @@ -46,9 +52,24 @@ pub async fn get_instance_handler( })?; match instance { - Some(i) => Ok(JsonResponse::build() - .set_item(Some(i)) - .ok("Pipe instance fetched successfully")), + Some(i) => { + // Verify the deployment belongs to the requesting user + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &i.deployment_hash) + .await + .map_err(|err| JsonResponse::internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Pipe instance not found")); + } + } + + Ok(JsonResponse::build() + .set_item(Some(i)) + .ok("Pipe instance fetched successfully")) + } None => Err(JsonResponse::not_found("Pipe instance not found")), } } diff --git a/src/routes/pipe/list.rs b/src/routes/pipe/list.rs index 1bcc6185..e0b4f971 100644 --- a/src/routes/pipe/list.rs +++ b/src/routes/pipe/list.rs @@ -14,15 +14,17 @@ pub struct ListTemplatesQuery { pub public_only: bool, } -#[tracing::instrument(name = "List pipe templates", skip(pg_pool, _user))] +#[tracing::instrument(name = "List pipe templates", skip_all)] #[get("/templates")] pub async fn list_templates_handler( - _user: web::ReqData>, + user: web::ReqData>, query: web::Query, pg_pool: web::Data, ) -> Result { - let templates = db::pipe::list_templates( + // Show user's own templates + public templates (never other users' private templates) + let templates = db::pipe::list_templates_for_user( pg_pool.get_ref(), + &user.id, query.source_app_type.as_deref(), query.target_app_type.as_deref(), query.public_only, @@ -38,15 +40,28 @@ pub async fn list_templates_handler( .ok("Pipe templates fetched successfully")) } -#[tracing::instrument(name = "List pipe instances for deployment", skip(pg_pool, _user))] +#[tracing::instrument(name = "List pipe instances for deployment", skip_all)] #[get("/instances/{deployment_hash}")] pub async fn list_instances_handler( - _user: web::ReqData>, + user: web::ReqData>, path: web::Path, pg_pool: web::Data, ) -> Result { let deployment_hash = path.into_inner(); + // Verify deployment belongs to the requesting user + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &deployment_hash) + .await + .map_err(|err| JsonResponse::internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Deployment not found")); + } + } + let instances = db::pipe::list_instances(pg_pool.get_ref(), &deployment_hash) .await .map_err(|err| { diff --git a/src/routes/pipe/mod.rs b/src/routes/pipe/mod.rs index 60e678d7..971a9419 100644 --- a/src/routes/pipe/mod.rs +++ b/src/routes/pipe/mod.rs @@ -2,8 +2,10 @@ mod create; mod delete; mod get; mod list; +mod update; pub use create::*; pub use delete::*; pub use get::*; pub use list::*; +pub use update::*; diff --git a/src/routes/pipe/update.rs b/src/routes/pipe/update.rs new file mode 100644 index 00000000..baff173f --- /dev/null +++ b/src/routes/pipe/update.rs @@ -0,0 +1,68 @@ +use crate::db; +use crate::helpers::JsonResponse; +use crate::models::User; +use actix_web::{put, web, Responder, Result}; +use serde::Deserialize; +use sqlx::PgPool; +use std::sync::Arc; + +#[derive(Debug, Deserialize)] +pub struct UpdatePipeStatusRequest { + pub status: String, +} + +const VALID_STATUSES: &[&str] = &["draft", "active", "paused", "error"]; + +#[tracing::instrument(name = "Update pipe instance status", skip_all)] +#[put("/instances/{instance_id}/status")] +pub async fn update_instance_status_handler( + user: web::ReqData>, + path: web::Path, + body: web::Json, + pg_pool: web::Data, +) -> Result { + let instance_id = path.into_inner(); + + if !VALID_STATUSES.contains(&body.status.as_str()) { + return Err(JsonResponse::<()>::build().bad_request( + "Invalid status. Must be one of: draft, active, paused, error", + )); + } + + let instance = db::pipe::get_instance(pg_pool.get_ref(), &instance_id) + .await + .map_err(|err| { + tracing::error!("Failed to fetch pipe instance: {}", err); + JsonResponse::<()>::build().internal_server_error(err) + })?; + + match &instance { + Some(i) => { + let deployment = + db::deployment::fetch_by_deployment_hash(pg_pool.get_ref(), &i.deployment_hash) + .await + .map_err(|err| JsonResponse::<()>::build().internal_server_error(err))?; + + match &deployment { + Some(d) if d.user_id.as_deref() == Some(&user.id) => {} + _ => { + return Err(JsonResponse::not_found("Pipe instance not found")); + } + } + } + None => { + return Err(JsonResponse::not_found("Pipe instance not found")); + } + } + + let updated = db::pipe::update_instance_status(pg_pool.get_ref(), &instance_id, &body.status) + .await + .map_err(|err| { + tracing::error!("Failed to update pipe instance status: {}", err); + JsonResponse::<()>::build().internal_server_error(err) + })?; + + Ok(JsonResponse::build() + .set_item(Some(updated)) + .ok("Pipe instance status updated successfully")) +} diff --git a/src/routes/project/add.rs b/src/routes/project/add.rs index b7f94a12..fd2da58d 100644 --- a/src/routes/project/add.rs +++ b/src/routes/project/add.rs @@ -8,7 +8,7 @@ use serde_valid::Validate; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Add project.")] +#[tracing::instrument(name = "Add project.", skip_all)] #[post("")] pub async fn item( web::Json(request_json): web::Json, diff --git a/src/routes/project/app.rs b/src/routes/project/app.rs index 4207995a..76be375d 100644 --- a/src/routes/project/app.rs +++ b/src/routes/project/app.rs @@ -134,7 +134,7 @@ pub struct CreateAppRequest { } /// List all apps in a project -#[tracing::instrument(name = "List project apps", skip(pg_pool))] +#[tracing::instrument(name = "List project apps", skip_all)] #[get("/{project_id}/apps")] pub async fn list_apps( user: web::ReqData>, @@ -165,7 +165,7 @@ pub async fn list_apps( } /// Create or update an app in a project -#[tracing::instrument(name = "Create project app", skip(pg_pool))] +#[tracing::instrument(name = "Create project app", skip_all)] #[post("/{project_id}/apps")] pub async fn create_app( user: web::ReqData>, @@ -247,7 +247,7 @@ pub async fn create_app( } /// Get a specific app by code -#[tracing::instrument(name = "Get project app", skip(pg_pool))] +#[tracing::instrument(name = "Get project app", skip_all)] #[get("/{project_id}/apps/{code}")] pub async fn get_app( user: web::ReqData>, @@ -278,7 +278,7 @@ pub async fn get_app( } /// Get app configuration (env vars, ports, domain, etc.) -#[tracing::instrument(name = "Get app config", skip(pg_pool))] +#[tracing::instrument(name = "Get app config", skip_all)] #[get("/{project_id}/apps/{code}/config")] pub async fn get_app_config( user: web::ReqData>, @@ -325,7 +325,7 @@ pub async fn get_app_config( } /// Get environment variables for an app -#[tracing::instrument(name = "Get app env vars", skip(pg_pool))] +#[tracing::instrument(name = "Get app env vars", skip_all)] #[get("/{project_id}/apps/{code}/env")] pub async fn get_env_vars( user: web::ReqData>, @@ -365,7 +365,7 @@ pub async fn get_env_vars( } /// Update environment variables for an app -#[tracing::instrument(name = "Update app env vars", skip(pg_pool, body))] +#[tracing::instrument(name = "Update app env vars", skip_all)] #[put("/{project_id}/apps/{code}/env")] pub async fn update_env_vars( user: web::ReqData>, @@ -422,7 +422,7 @@ pub async fn update_env_vars( } /// Delete a specific environment variable -#[tracing::instrument(name = "Delete app env var", skip(pg_pool))] +#[tracing::instrument(name = "Delete app env var", skip_all)] #[delete("/{project_id}/apps/{code}/env/{name}")] pub async fn delete_env_var( user: web::ReqData>, @@ -482,7 +482,7 @@ pub async fn delete_env_var( } /// Update port mappings for an app -#[tracing::instrument(name = "Update app ports", skip(pg_pool, body))] +#[tracing::instrument(name = "Update app ports", skip_all)] #[put("/{project_id}/apps/{code}/ports")] pub async fn update_ports( user: web::ReqData>, @@ -535,7 +535,7 @@ pub async fn update_ports( } /// Update domain and SSL settings for an app -#[tracing::instrument(name = "Update app domain", skip(pg_pool, body))] +#[tracing::instrument(name = "Update app domain", skip_all)] #[put("/{project_id}/apps/{code}/domain")] pub async fn update_domain( user: web::ReqData>, diff --git a/src/routes/project/compose.rs b/src/routes/project/compose.rs index a36f8ff1..80cf6367 100644 --- a/src/routes/project/compose.rs +++ b/src/routes/project/compose.rs @@ -6,7 +6,7 @@ use actix_web::{get, web, web::Data, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "User's generate docker-compose.")] +#[tracing::instrument(name = "User's generate docker-compose.", skip_all)] #[get("/{id}/compose")] pub async fn add( user: web::ReqData>, @@ -31,10 +31,10 @@ pub async fn add( .map(|fc| JsonResponse::build().set_id(id).set_item(fc).ok("Success")) } -#[tracing::instrument(name = "Generate docker-compose. Admin")] +#[tracing::instrument(name = "Generate docker-compose. Admin", skip_all)] #[get("/{id}/compose")] pub async fn admin( - user: web::ReqData>, + _user: web::ReqData>, path: web::Path<(i32,)>, pg_pool: Data, ) -> Result { diff --git a/src/routes/project/delete.rs b/src/routes/project/delete.rs index e45e8ee1..55793b8c 100644 --- a/src/routes/project/delete.rs +++ b/src/routes/project/delete.rs @@ -6,7 +6,7 @@ use actix_web::{delete, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Delete project of a user.")] +#[tracing::instrument(name = "Delete project of a user.", skip_all)] #[delete("/{id}")] pub async fn item( user: web::ReqData>, @@ -27,7 +27,7 @@ pub async fn item( None => Err(JsonResponse::::build().not_found("")), })?; - db::project::delete(pg_pool.get_ref(), project.id) + db::project::delete(pg_pool.get_ref(), project.id, &user.id) .await .map_err(|err| JsonResponse::::build().internal_server_error(err)) .and_then(|result| match result { diff --git a/src/routes/project/deploy.rs b/src/routes/project/deploy.rs index f8b9acbc..9fadc954 100644 --- a/src/routes/project/deploy.rs +++ b/src/routes/project/deploy.rs @@ -13,7 +13,7 @@ use sqlx::PgPool; use std::sync::Arc; use uuid::Uuid; -#[tracing::instrument(name = "Deploy for every user", skip(user_service, install_service, vault_client))] +#[tracing::instrument(name = "Deploy for every user", skip_all)] #[post("/{id}/deploy")] pub async fn item( user: web::ReqData>, @@ -21,13 +21,13 @@ pub async fn item( mut form: web::Json, pg_pool: Data, mq_manager: Data, - sets: Data, + _sets: Data, user_service: Data>, install_service: Data>, vault_client: Data, ) -> Result { let id = path.0; - tracing::debug!("User {:?} is deploying project: {}", user, id); + tracing::debug!("User {} is deploying project: {}", user.id, id); if !form.validate().is_ok() { let errors = form.validate().unwrap_err().to_string(); @@ -363,7 +363,7 @@ pub async fn item( }) .map_err(|err| JsonResponse::::build().internal_server_error(err)) } -#[tracing::instrument(name = "Deploy, when cloud token is saved", skip(user_service, install_service, vault_client))] +#[tracing::instrument(name = "Deploy, when cloud token is saved", skip_all)] #[post("/{id}/deploy/{cloud_id}")] pub async fn saved_item( user: web::ReqData>, @@ -371,7 +371,7 @@ pub async fn saved_item( path: web::Path<(i32, i32)>, pg_pool: Data, mq_manager: Data, - sets: Data, + _sets: Data, user_service: Data>, install_service: Data>, vault_client: Data, @@ -380,8 +380,8 @@ pub async fn saved_item( let cloud_id = path.1; tracing::debug!( - "User {:?} is deploying project: {} to cloud: {} ", - user, + "User {} is deploying project: {} to cloud: {}", + user.id, id, cloud_id ); diff --git a/src/routes/project/discover.rs b/src/routes/project/discover.rs index 9dbc3ef8..084b720c 100644 --- a/src/routes/project/discover.rs +++ b/src/routes/project/discover.rs @@ -80,7 +80,7 @@ pub struct ContainerImport { /// - Registered apps with running containers (synced) /// - Running containers not in database (unregistered, can be imported) /// - Database apps with no running container (stopped or name mismatch) -#[tracing::instrument(name = "Discover containers", skip(pg_pool))] +#[tracing::instrument(name = "Discover containers", skip_all)] #[get("/{project_id}/containers/discover")] pub async fn discover_containers( user: web::ReqData>, @@ -330,7 +330,7 @@ pub async fn discover_containers( } /// Import unregistered containers into project_app -#[tracing::instrument(name = "Import containers", skip(pg_pool, body))] +#[tracing::instrument(name = "Import containers", skip_all)] #[post("/{project_id}/containers/import")] pub async fn import_containers( user: web::ReqData>, diff --git a/src/routes/project/get.rs b/src/routes/project/get.rs index 6e9049c4..c56e8571 100644 --- a/src/routes/project/get.rs +++ b/src/routes/project/get.rs @@ -5,7 +5,7 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Get logged user project.")] +#[tracing::instrument(name = "Get logged user project.", skip_all)] #[get("/{id}")] pub async fn item( user: web::ReqData>, @@ -27,7 +27,7 @@ pub async fn item( }) } -#[tracing::instrument(name = "Get project list.")] +#[tracing::instrument(name = "Get project list.", skip_all)] #[get("")] pub async fn list( user: web::ReqData>, @@ -40,10 +40,10 @@ pub async fn list( } //admin's endpoint -#[tracing::instrument(name = "Get user's project list.")] +#[tracing::instrument(name = "Get user's project list.", skip_all)] #[get("/user/{id}")] pub async fn admin_list( - user: web::ReqData>, + _user: web::ReqData>, path: web::Path<(String,)>, pg_pool: web::Data, ) -> Result { diff --git a/src/routes/project/update.rs b/src/routes/project/update.rs index f02b9f0c..4c815c62 100644 --- a/src/routes/project/update.rs +++ b/src/routes/project/update.rs @@ -8,7 +8,7 @@ use serde_valid::Validate; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Update project.")] +#[tracing::instrument(name = "Update project.", skip_all)] #[put("/{id}")] pub async fn item( path: web::Path<(i32,)>, diff --git a/src/routes/rating/add.rs b/src/routes/rating/add.rs index fa01baf3..69e91b67 100644 --- a/src/routes/rating/add.rs +++ b/src/routes/rating/add.rs @@ -8,7 +8,7 @@ use serde_valid::Validate; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "Add rating.")] +#[tracing::instrument(name = "Add rating.", skip_all)] #[post("")] pub async fn user_add_handler( user: web::ReqData>, diff --git a/src/routes/rating/delete.rs b/src/routes/rating/delete.rs index ae6dfe4b..8bce6bb5 100644 --- a/src/routes/rating/delete.rs +++ b/src/routes/rating/delete.rs @@ -6,7 +6,7 @@ use actix_web::{delete, web, Responder, Result}; use sqlx::PgPool; use std::sync::Arc; -#[tracing::instrument(name = "User delete rating.")] +#[tracing::instrument(name = "User delete rating.", skip_all)] #[delete("/{id}")] pub async fn user_delete_handler( user: web::ReqData>, @@ -33,10 +33,10 @@ pub async fn user_delete_handler( }) } -#[tracing::instrument(name = "Admin delete rating.")] +#[tracing::instrument(name = "Admin delete rating.", skip_all)] #[delete("/{id}")] pub async fn admin_delete_handler( - user: web::ReqData>, + _user: web::ReqData>, path: web::Path<(i32,)>, pg_pool: web::Data, ) -> Result { diff --git a/src/routes/rating/edit.rs b/src/routes/rating/edit.rs index 6d898f57..0646dd28 100644 --- a/src/routes/rating/edit.rs +++ b/src/routes/rating/edit.rs @@ -13,7 +13,7 @@ use std::sync::Arc; // ACL - access to func for a user // ACL - access to objects for a user -#[tracing::instrument(name = "User edit rating.")] +#[tracing::instrument(name = "User edit rating.", skip_all)] #[put("/{id}")] pub async fn user_edit_handler( path: web::Path<(i32,)>, @@ -49,7 +49,7 @@ pub async fn user_edit_handler( }) } -#[tracing::instrument(name = "Admin edit rating.")] +#[tracing::instrument(name = "Admin edit rating.", skip_all)] #[put("/{id}")] pub async fn admin_edit_handler( path: web::Path<(i32,)>, diff --git a/src/routes/rating/get.rs b/src/routes/rating/get.rs index 9cfdd9cf..ce51c755 100644 --- a/src/routes/rating/get.rs +++ b/src/routes/rating/get.rs @@ -5,7 +5,7 @@ use actix_web::{get, web, Responder, Result}; use sqlx::PgPool; use std::convert::Into; -#[tracing::instrument(name = "Anonymouse get rating.")] +#[tracing::instrument(name = "Anonymouse get rating.", skip_all)] #[get("/{id}")] pub async fn anonymous_get_handler( path: web::Path<(i32,)>, @@ -25,10 +25,10 @@ pub async fn anonymous_get_handler( .ok("OK")) } -#[tracing::instrument(name = "Anonymous get all ratings.")] +#[tracing::instrument(name = "Anonymous get all ratings.", skip_all)] #[get("")] pub async fn anonymous_list_handler( - path: web::Path<()>, + _path: web::Path<()>, pg_pool: web::Data, ) -> Result { db::rating::fetch_all_visible(pg_pool.get_ref()) @@ -44,7 +44,7 @@ pub async fn anonymous_list_handler( .map_err(|_err| JsonResponse::::build().internal_server_error("")) } -#[tracing::instrument(name = "Admin get rating.")] +#[tracing::instrument(name = "Admin get rating.", skip_all)] #[get("/{id}")] pub async fn admin_get_handler( path: web::Path<(i32,)>, @@ -64,10 +64,10 @@ pub async fn admin_get_handler( .ok("OK")) } -#[tracing::instrument(name = "Admin get the list of ratings.")] +#[tracing::instrument(name = "Admin get the list of ratings.", skip_all)] #[get("")] pub async fn admin_list_handler( - path: web::Path<()>, + _path: web::Path<()>, pg_pool: web::Data, ) -> Result { db::rating::fetch_all(pg_pool.get_ref()) diff --git a/src/routes/server/add.rs b/src/routes/server/add.rs index 5a8970c3..802a99d1 100644 --- a/src/routes/server/add.rs +++ b/src/routes/server/add.rs @@ -13,7 +13,7 @@ // ACL - access to func for a user // ACL - access to objects for a user -// #[tracing::instrument(name = "Add server.")] +// #[tracing::instrument(name = "Add server.", skip_all)] // #[post("")] // pub async fn add( // user: web::ReqData>, diff --git a/src/routes/server/delete.rs b/src/routes/server/delete.rs index ebc9d87e..e5b9fec2 100644 --- a/src/routes/server/delete.rs +++ b/src/routes/server/delete.rs @@ -8,7 +8,7 @@ use std::sync::Arc; /// Preview what would be deleted if the server is removed. /// Returns: ssh_key_shared, affected_deployments, agent_count -#[tracing::instrument(name = "Preview server deletion impact.")] +#[tracing::instrument(name = "Preview server deletion impact.", skip_all)] #[get("/{id}/delete-preview")] pub async fn delete_preview( user: web::ReqData>, @@ -70,7 +70,7 @@ pub async fn delete_preview( })).ok("Delete preview")) } -#[tracing::instrument(name = "Delete user's server with cleanup.")] +#[tracing::instrument(name = "Delete user's server with cleanup.", skip_all)] #[delete("/{id}")] pub async fn item( user: web::ReqData>, @@ -152,7 +152,7 @@ pub async fn item( } // 4. Delete server record from DB - db::server::delete(pg_pool.get_ref(), server.id) + db::server::delete(pg_pool.get_ref(), server.id, &user.id) .await .map_err(|err| JsonResponse::::build().internal_server_error(err)) .and_then(|result| match result { diff --git a/src/routes/server/get.rs b/src/routes/server/get.rs index 9d3ef9dd..07951acf 100644 --- a/src/routes/server/get.rs +++ b/src/routes/server/get.rs @@ -11,7 +11,7 @@ use std::sync::Arc; // ACL - access to func for a user // ACL - access to objects for a user -#[tracing::instrument(name = "Get server.")] +#[tracing::instrument(name = "Get server.", skip_all)] #[get("/{id}")] pub async fn item( path: web::Path<(i32,)>, @@ -31,10 +31,10 @@ pub async fn item( }) } -#[tracing::instrument(name = "Get all servers.")] +#[tracing::instrument(name = "Get all servers.", skip_all)] #[get("")] pub async fn list( - path: web::Path<()>, + _path: web::Path<()>, user: web::ReqData>, pg_pool: web::Data, ) -> Result { @@ -44,7 +44,7 @@ pub async fn list( .map_err(|_err| JsonResponse::::build().internal_server_error("")) } -#[tracing::instrument(name = "Get servers by project.")] +#[tracing::instrument(name = "Get servers by project.", skip_all)] #[get("/project/{project_id}")] pub async fn list_by_project( path: web::Path<(i32,)>, diff --git a/src/routes/server/ssh_key.rs b/src/routes/server/ssh_key.rs index fb1fca0a..618abf0b 100644 --- a/src/routes/server/ssh_key.rs +++ b/src/routes/server/ssh_key.rs @@ -58,7 +58,7 @@ async fn verify_server_ownership( /// Generate a new SSH key pair for a server /// POST /server/{id}/ssh-key/generate -#[tracing::instrument(name = "Generate SSH key for server.")] +#[tracing::instrument(name = "Generate SSH key for server.", skip_all)] #[post("/{id}/ssh-key/generate")] pub async fn generate_key( path: web::Path<(i32,)>, @@ -131,7 +131,7 @@ pub async fn generate_key( /// Upload an existing SSH key pair for a server /// POST /server/{id}/ssh-key/upload -#[tracing::instrument(name = "Upload SSH key for server.", skip(form))] +#[tracing::instrument(name = "Upload SSH key for server.", skip_all)] #[post("/{id}/ssh-key/upload")] pub async fn upload_key( path: web::Path<(i32,)>, @@ -195,7 +195,7 @@ pub async fn upload_key( /// Get the public key for a server (for copying to authorized_keys) /// GET /server/{id}/ssh-key/public -#[tracing::instrument(name = "Get public SSH key for server.")] +#[tracing::instrument(name = "Get public SSH key for server.", skip_all)] #[get("/{id}/ssh-key/public")] pub async fn get_public_key( path: web::Path<(i32,)>, @@ -293,7 +293,7 @@ pub struct ValidateResponse { /// 3. Connects to the server via SSH and authenticates /// 4. Runs system diagnostic commands (whoami, df, docker, os-release, free) /// 5. Returns comprehensive system information -#[tracing::instrument(name = "Validate SSH key for server.")] +#[tracing::instrument(name = "Validate SSH key for server.", skip_all)] #[post("/{id}/ssh-key/validate")] pub async fn validate_key( path: web::Path<(i32,)>, @@ -436,7 +436,7 @@ pub async fn validate_key( /// Delete SSH key for a server (disconnect) /// DELETE /server/{id}/ssh-key -#[tracing::instrument(name = "Delete SSH key for server.")] +#[tracing::instrument(name = "Delete SSH key for server.", skip_all)] #[delete("/{id}/ssh-key")] pub async fn delete_key( path: web::Path<(i32,)>, diff --git a/src/routes/server/update.rs b/src/routes/server/update.rs index a5ed65dc..61b1f8d7 100644 --- a/src/routes/server/update.rs +++ b/src/routes/server/update.rs @@ -8,7 +8,7 @@ use sqlx::PgPool; use std::ops::Deref; use std::sync::Arc; -#[tracing::instrument(name = "Update server.")] +#[tracing::instrument(name = "Update server.", skip_all)] #[put("/{id}")] pub async fn item( path: web::Path<(i32,)>, diff --git a/src/routes/test/deploy.rs b/src/routes/test/deploy.rs index 89fd0007..3537cb73 100644 --- a/src/routes/test/deploy.rs +++ b/src/routes/test/deploy.rs @@ -11,7 +11,7 @@ struct DeployResponse { client: Arc, } -#[tracing::instrument(name = "Test deploy.")] +#[tracing::instrument(name = "Test deploy.", skip_all)] #[post("/deploy")] pub async fn handler(client: web::ReqData>) -> Result { Ok(JsonResponse::build() diff --git a/src/startup.rs b/src/startup.rs index 9704ae75..aec64b96 100644 --- a/src/startup.rs +++ b/src/startup.rs @@ -240,7 +240,8 @@ pub async fn run( .service(routes::pipe::get_template_handler) .service(routes::pipe::get_instance_handler) .service(routes::pipe::delete_template_handler) - .service(routes::pipe::delete_instance_handler), + .service(routes::pipe::delete_instance_handler) + .service(routes::pipe::update_instance_status_handler), ) .service( web::scope("/admin") diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 28774511..faa9858c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -67,6 +67,178 @@ pub async fn spawn_app() -> Option { spawn_app_with_configuration(configuration).await } +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Multi-user test infrastructure for IDOR security tests +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// User A (default) identifiers — the "owner" in IDOR tests +pub const USER_A_ID: &str = "test_user_id"; +pub const USER_A_EMAIL: &str = "test@example.com"; +pub const USER_A_TOKEN: &str = "user-a-token"; + +/// User B identifiers — the "attacker" in IDOR tests +pub const USER_B_ID: &str = "other_user_id"; +pub const USER_B_EMAIL: &str = "other@example.com"; +pub const USER_B_TOKEN: &str = "user-b-token"; + +pub struct TwoUserTestApp { + pub address: String, + pub db_pool: PgPool, +} + +/// Spawn an app with a token-aware mock auth server. +/// - Bearer token containing "user-b" → returns User B (other_user_id) +/// - Any other Bearer token → returns User A (test_user_id) +pub async fn spawn_app_two_users() -> Option { + let mut configuration = get_configuration().expect("Failed to get configuration"); + + let auth_listener = std::net::TcpListener::bind("127.0.0.1:0") + .expect("Failed to bind port for testing auth server"); + + configuration.auth_url = format!( + "http://127.0.0.1:{}/me", + auth_listener.local_addr().unwrap().port() + ); + + let _ = tokio::spawn(mock_auth_server_two_users(auth_listener)); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + configuration.database.database_name = uuid::Uuid::new_v4().to_string(); + + let connection_pool = match configure_database(&configuration.database).await { + Ok(pool) => pool, + Err(err) => { + eprintln!("Skipping tests: failed to connect to postgres: {}", err); + return None; + } + }; + + let app_listener = + std::net::TcpListener::bind("127.0.0.1:0").expect("Failed to bind app port"); + let port = app_listener.local_addr().unwrap().port(); + let address = format!("http://127.0.0.1:{}", port); + + let agent_pool = AgentPgPool::new(connection_pool.clone()); + let server = stacker::startup::run( + app_listener, + connection_pool.clone(), + agent_pool, + configuration, + ) + .await + .expect("Failed to bind address."); + let _ = tokio::spawn(server); + + Some(TwoUserTestApp { + address, + db_pool: connection_pool, + }) +} + +/// Token-aware mock auth: inspects the Authorization header to return different users. +#[get("")] +async fn mock_auth_two_users(req: actix_web::HttpRequest) -> actix_web::Result { + let auth_header = req + .headers() + .get("Authorization") + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + + let is_user_b = auth_header.contains("user-b"); + + let mut user = forms::user::User::default(); + if is_user_b { + user.id = USER_B_ID.to_string(); + user.email = USER_B_EMAIL.to_string(); + } else { + user.id = USER_A_ID.to_string(); + user.email = USER_A_EMAIL.to_string(); + } + user.role = "group_user".to_string(); + user.email_confirmed = true; + + Ok(web::Json(forms::user::UserForm { user })) +} + +async fn mock_auth_server_two_users(listener: TcpListener) { + HttpServer::new(|| { + App::new().service(web::scope("/me").service(mock_auth_two_users)) + }) + .listen(listener) + .unwrap() + .run() + .await + .unwrap(); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Test data helpers +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Insert a minimal cloud credential into the DB and return its id. +pub async fn create_test_cloud(pool: &PgPool, user_id: &str, name: &str, provider: &str) -> i32 { + sqlx::query( + r#"INSERT INTO cloud (user_id, name, provider, cloud_token, save_token, created_at, updated_at) + VALUES ($1, $2, $3, 'test-token-encrypted', true, NOW(), NOW()) + RETURNING id"#, + ) + .bind(user_id) + .bind(name) + .bind(provider) + .fetch_one(pool) + .await + .map(|row| { + use sqlx::Row; + row.get::("id") + }) + .expect("Failed to insert test cloud") +} + +/// Insert a minimal deployment into the DB and return its id. +pub async fn create_test_deployment( + pool: &PgPool, + user_id: &str, + project_id: i32, + deployment_hash: &str, +) -> i32 { + sqlx::query( + r#"INSERT INTO deployment (project_id, deployment_hash, user_id, status, runtime, created_at, updated_at) + VALUES ($1, $2, $3, 'running', 'runc', NOW(), NOW()) + RETURNING id"#, + ) + .bind(project_id) + .bind(deployment_hash) + .bind(user_id) + .fetch_one(pool) + .await + .map(|row| { + use sqlx::Row; + row.get::("id") + }) + .expect("Failed to insert test deployment") +} + +/// Insert a test client record and return its id. +pub async fn create_test_client(pool: &PgPool, user_id: &str) -> i32 { + sqlx::query( + r#"INSERT INTO client (user_id, secret, enabled, created_at, updated_at) + VALUES ($1, 'test-client-secret', true, NOW(), NOW()) + RETURNING id"#, + ) + .bind(user_id) + .fetch_one(pool) + .await + .map(|row| { + use sqlx::Row; + row.get::("id") + }) + .expect("Failed to insert test client") +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Original infrastructure +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + pub async fn configure_database(config: &DatabaseSettings) -> Result { let mut connection = PgConnection::connect(&config.connection_string_without_db()).await?; diff --git a/tests/security_admin.rs b/tests/security_admin.rs new file mode 100644 index 00000000..5d714eac --- /dev/null +++ b/tests/security_admin.rs @@ -0,0 +1,116 @@ +mod common; + +use common::{USER_A_TOKEN, USER_B_TOKEN}; + +/// Admin endpoints (/admin/*) are protected by Casbin RBAC. +/// Mock users have role "group_user" which has no admin policies. +/// Requests should be denied with 403 Forbidden. + +#[tokio::test] +async fn test_admin_list_users_rejects_non_admin() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // group_user → Casbin denies /admin/* → 403 + let resp = client + .get(format!("{}/admin/rating", &app.address)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 403, + "Regular user GET /admin/rating should return 403" + ); +} + +#[tokio::test] +async fn test_admin_routes_reject_unauthenticated() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let admin_endpoints = vec![ + ("GET", format!("{}/admin/rating", &app.address)), + ("GET", format!("{}/admin/rating/1", &app.address)), + ( + "PUT", + format!("{}/admin/client/1/enable", &app.address), + ), + ]; + + for (method, url) in admin_endpoints { + let req = match method { + "GET" => client.get(&url), + "PUT" => client.put(&url), + _ => unreachable!(), + }; + + // No Authorization header → anonymous → Casbin denies → 403 + let resp = req.send().await.expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 403, + "Unauthenticated {} {} should return 403", + method, + url + ); + } +} + +#[tokio::test] +async fn test_admin_endpoint_not_accessible_to_regular_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let admin_endpoints = vec![ + ("GET", format!("{}/admin/rating", &app.address)), + ("GET", format!("{}/admin/rating/999", &app.address)), + ( + "PUT", + format!("{}/admin/client/999", &app.address), + ), + ( + "PUT", + format!("{}/admin/client/999/enable", &app.address), + ), + ( + "PUT", + format!("{}/admin/client/999/disable", &app.address), + ), + ]; + + for (method, url) in admin_endpoints { + for token in [USER_A_TOKEN, USER_B_TOKEN] { + let req = match method { + "GET" => client.get(&url), + "PUT" => client.put(&url), + "DELETE" => client.delete(&url), + _ => unreachable!(), + }; + + let resp = req + .header("Authorization", format!("Bearer {}", token)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 403, + "Regular user {} {} should return 403 (token={})", + method, + url, + token + ); + } + } +} diff --git a/tests/security_agent.rs b/tests/security_agent.rs new file mode 100644 index 00000000..9abe0f0f --- /dev/null +++ b/tests/security_agent.rs @@ -0,0 +1,256 @@ +//! Security tests for agent-related endpoints. +//! +//! Validates that users can only enqueue commands and access data +//! for deployments they own. + +mod common; + +use common::{ + create_test_deployment, create_test_project, spawn_app_two_users, USER_A_ID, USER_A_TOKEN, + USER_B_TOKEN, +}; + +/// Helper: insert a command directly into the DB for testing. +async fn insert_test_command( + pool: &sqlx::PgPool, + deployment_hash: &str, + created_by: &str, +) -> String { + let cmd_id = format!("cmd_{}", uuid::Uuid::new_v4()); + sqlx::query( + "INSERT INTO commands (command_id, deployment_hash, type, status, parameters, created_by, created_at) + VALUES ($1, $2, 'status', 'queued', '{}'::jsonb, $3, NOW())", + ) + .bind(&cmd_id) + .bind(deployment_hash) + .bind(created_by) + .execute(pool) + .await + .expect("Failed to insert test command"); + cmd_id +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Enqueue — User B should NOT enqueue on User A's deployment +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_enqueue_command_rejects_other_user() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // User A owns a deployment + let project_id = create_test_project(&app.db_pool, USER_A_ID).await; + let _dep_id = create_test_deployment(&app.db_pool, USER_A_ID, project_id, "dep-a-001").await; + + // User B tries to enqueue a command on User A's deployment + let resp = client + .post(format!( + "{}/api/v1/agent/commands/enqueue", + &app.address + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .json(&serde_json::json!({ + "deployment_hash": "dep-a-001", + "command_type": "status", + })) + .send() + .await + .expect("Failed to send request"); + + // Should be 403 or 404, not 201 + assert!( + resp.status() == 403 || resp.status() == 404, + "User B should NOT enqueue on User A's deployment. Got: {}", + resp.status() + ); +} + +#[tokio::test] +async fn test_owner_can_enqueue_on_own_deployment() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let project_id = create_test_project(&app.db_pool, USER_A_ID).await; + let _dep_id = create_test_deployment(&app.db_pool, USER_A_ID, project_id, "dep-own-001").await; + + let resp = client + .post(format!( + "{}/api/v1/agent/commands/enqueue", + &app.address + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .json(&serde_json::json!({ + "deployment_hash": "dep-own-001", + "command_type": "status", + })) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status(), + 201, + "Owner should be able to enqueue. Got: {}", + resp.status() + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Commands list — User B should NOT list User A's commands +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_list_commands_rejects_other_user() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let project_id = create_test_project(&app.db_pool, USER_A_ID).await; + let _dep_id = create_test_deployment(&app.db_pool, USER_A_ID, project_id, "dep-cmd-a").await; + let _cmd_id = insert_test_command(&app.db_pool, "dep-cmd-a", USER_A_ID).await; + + // User B tries to list User A's commands + let resp = client + .get(format!( + "{}/api/v1/commands/dep-cmd-a", + &app.address + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + // Should be 404 or empty, not return User A's commands + if resp.status().is_success() { + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("Expected list field"); + assert!( + list.is_empty(), + "User B should NOT see User A's commands. Got {} commands", + list.len() + ); + } +} + +#[tokio::test] +async fn test_get_command_detail_rejects_other_user() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let project_id = create_test_project(&app.db_pool, USER_A_ID).await; + let _dep_id = create_test_deployment(&app.db_pool, USER_A_ID, project_id, "dep-cmd-b").await; + let cmd_id = insert_test_command(&app.db_pool, "dep-cmd-b", USER_A_ID).await; + + // User B tries to get User A's command detail + let resp = client + .get(format!( + "{}/api/v1/commands/dep-cmd-b/{}", + &app.address, cmd_id + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!( + resp.status() == 403 || resp.status() == 404, + "User B should NOT see User A's command detail. Got: {}", + resp.status() + ); +} + +#[tokio::test] +async fn test_owner_can_list_own_commands() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let project_id = create_test_project(&app.db_pool, USER_A_ID).await; + let _dep_id = + create_test_deployment(&app.db_pool, USER_A_ID, project_id, "dep-cmd-own").await; + let _cmd_id = insert_test_command(&app.db_pool, "dep-cmd-own", USER_A_ID).await; + + let resp = client + .get(format!( + "{}/api/v1/commands/dep-cmd-own", + &app.address + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!(resp.status().is_success(), "Owner should list own commands"); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("Expected list field"); + assert!( + !list.is_empty(), + "Owner should see at least one command" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Unauthenticated access should be rejected +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_enqueue_rejects_unauthenticated() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let resp = client + .post(format!( + "{}/api/v1/agent/commands/enqueue", + &app.address + )) + // No Authorization header + .json(&serde_json::json!({ + "deployment_hash": "dep-test", + "command_type": "status", + })) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status(), + 401, + "Unauthenticated enqueue should be 401. Got: {}", + resp.status() + ); +} + +#[tokio::test] +async fn test_commands_list_rejects_unauthenticated() { + let Some(app) = spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let resp = client + .get(format!( + "{}/api/v1/commands/some-hash", + &app.address + )) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status(), + 401, + "Unauthenticated command list should be 401. Got: {}", + resp.status() + ); +} diff --git a/tests/security_chat.rs b/tests/security_chat.rs new file mode 100644 index 00000000..7a6407a8 --- /dev/null +++ b/tests/security_chat.rs @@ -0,0 +1,242 @@ +mod common; + +use common::{USER_A_ID, USER_A_TOKEN, USER_B_TOKEN}; + +/// Chat endpoints use (user_id, project_id) as the lookup key. +/// Isolation is enforced server-side: the handler always uses the authenticated +/// user's ID, so User B cannot see or mutate User A's chat history. + +const TEST_PROJECT_ID: i32 = 9999; + +async fn insert_chat(pool: &sqlx::PgPool, user_id: &str, project_id: i32) { + sqlx::query( + "INSERT INTO chat_conversations (id, user_id, project_id, messages) \ + VALUES (gen_random_uuid(), $1, $2, '[{\"role\":\"user\",\"content\":\"hello\"}]'::jsonb)", + ) + .bind(user_id) + .bind(project_id) + .execute(pool) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_list_chats_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // User A saves a chat for a specific project + insert_chat(&app.db_pool, USER_A_ID, TEST_PROJECT_ID).await; + + // User B queries the same project_id → should get 404 (no chat for B) + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 404, + "User B should not see User A's chat history" + ); +} + +#[tokio::test] +async fn test_get_chat_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + insert_chat(&app.db_pool, USER_A_ID, TEST_PROJECT_ID).await; + + // User B GET on the same project_id → 404 + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 404, + "User B GET on User A's chat should return 404" + ); +} + +#[tokio::test] +async fn test_update_chat_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + insert_chat(&app.db_pool, USER_A_ID, TEST_PROJECT_ID).await; + + // User B upserts chat for the same project_id. + // This should create a SEPARATE chat for User B, not overwrite User A's. + let resp = client + .put(format!("{}/chat/history", &app.address)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .header("Content-Type", "application/json") + .body( + serde_json::json!({ + "project_id": TEST_PROJECT_ID, + "messages": [{"role": "user", "content": "attacker message"}] + }) + .to_string(), + ) + .send() + .await + .expect("Failed to send request"); + + assert!( + resp.status().is_success(), + "User B upsert should succeed (creates own chat)" + ); + + // Verify User A's chat is untouched + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let messages = body["item"]["messages"] + .as_array() + .expect("messages should be an array"); + assert_eq!(messages[0]["content"], "hello", "User A's chat must remain unchanged"); +} + +#[tokio::test] +async fn test_delete_chat_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + insert_chat(&app.db_pool, USER_A_ID, TEST_PROJECT_ID).await; + + // User B deletes → only deletes B's own (nonexistent) chat + let resp = client + .delete(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + // Should succeed (no-op) but not affect A's data + assert!(resp.status().is_success()); + + // Verify User A's chat still exists + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!( + resp.status().is_success(), + "User A's chat should survive User B's delete attempt" + ); +} + +#[tokio::test] +async fn test_owner_can_access_own_chat() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // User A creates chat via API + let resp = client + .put(format!("{}/chat/history", &app.address)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .header("Content-Type", "application/json") + .body( + serde_json::json!({ + "project_id": TEST_PROJECT_ID, + "messages": [{"role": "user", "content": "my chat"}] + }) + .to_string(), + ) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should create chat, got {}", + resp.status() + ); + + // User A can GET own chat + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should read own chat, got {}", + resp.status() + ); + + let body: serde_json::Value = resp.json().await.unwrap(); + assert_eq!(body["item"]["user_id"], USER_A_ID); + + // User A can DELETE own chat + let resp = client + .delete(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should delete own chat, got {}", + resp.status() + ); + + // Confirm it's gone + let resp = client + .get(format!( + "{}/chat/history?project_id={}", + &app.address, TEST_PROJECT_ID + )) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert_eq!(resp.status().as_u16(), 404, "Deleted chat should be gone"); +} diff --git a/tests/security_cli.rs b/tests/security_cli.rs new file mode 100644 index 00000000..ba992f24 --- /dev/null +++ b/tests/security_cli.rs @@ -0,0 +1,574 @@ +/// Security tests for CLI-facing endpoints. +/// +/// These tests verify that every API endpoint the `stacker` CLI calls +/// is properly scoped to the authenticated user. They exercise the +/// same HTTP paths that `stacker list projects`, `stacker list clouds`, +/// `stacker list servers`, `stacker list deployments`, `stacker deploy`, +/// and `stacker destroy` hit. +/// +/// Each test uses `spawn_app_two_users()` — User A is the owner, +/// User B is the attacker who must be rejected or isolated. +mod common; + +use reqwest::StatusCode; + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Helpers +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +async fn seed_full_deployment( + pool: &sqlx::PgPool, + user_id: &str, +) -> (i32, i32, String) { + let project_id = common::create_test_project(pool, user_id).await; + let hash = format!("dpl-{}", uuid::Uuid::new_v4()); + let deployment_id = + common::create_test_deployment(pool, user_id, project_id, &hash).await; + (project_id, deployment_id, hash) +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker list projects — GET /project +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_list_projects_user_isolation() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + // Seed: User A gets 3, User B gets 1 + for _ in 0..3 { + common::create_test_project(&app.db_pool, common::USER_A_ID).await; + } + common::create_test_project(&app.db_pool, common::USER_B_ID).await; + + // User A sees exactly 3 + let resp = client + .get(format!("{}/project", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 3, "User A should see exactly 3 projects, got {}", list.len()); + + // User B sees exactly 1 + let resp = client + .get(format!("{}/project", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see exactly 1 project, got {}", list.len()); +} + +#[tokio::test] +async fn test_cli_list_projects_unauthenticated() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let resp = client + .get(format!("{}/project", app.address)) + .send() + .await + .expect("request failed"); + + // Should reject without auth + assert!( + resp.status() == StatusCode::UNAUTHORIZED || resp.status() == StatusCode::FORBIDDEN, + "Unauthenticated request to /project should be rejected, got {}", + resp.status() + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker list clouds — GET /cloud +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_list_clouds_user_isolation() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + // Seed: User A has 2 cloud creds, User B has 1 + common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz-1", "htz").await; + common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz-2", "htz").await; + common::create_test_cloud(&app.db_pool, common::USER_B_ID, "b-aws", "aws").await; + + // User A sees 2 + let resp = client + .get(format!("{}/cloud", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 2, "User A should see 2 clouds"); + + // User B sees 1 + let resp = client + .get(format!("{}/cloud", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see 1 cloud"); +} + +#[tokio::test] +async fn test_cli_get_cloud_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let cloud_id = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + + // User B tries to read User A's cloud by ID + let resp = client + .get(format!("{}/cloud/{}", app.address, cloud_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's cloud credentials" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker list servers — GET /server +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_list_servers_user_isolation() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + // Servers need a project FK + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let proj_b = common::create_test_project(&app.db_pool, common::USER_B_ID).await; + + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "ready", None).await; + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "ready", None).await; + common::create_test_server(&app.db_pool, common::USER_B_ID, proj_b, "ready", None).await; + + // User A sees 2 + let resp = client + .get(format!("{}/server", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 2, "User A should see 2 servers"); + + // User B sees 1 + let resp = client + .get(format!("{}/server", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see 1 server"); +} + +#[tokio::test] +async fn test_cli_get_server_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let server_id = common::create_test_server( + &app.db_pool, common::USER_A_ID, proj_a, "ready", None, + ).await; + + let resp = client + .get(format!("{}/server/{}", app.address, server_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's server" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker list deployments — GET /api/v1/deployments +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_list_deployments_user_isolation() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, _, hash_a) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + let (_, _, hash_b) = seed_full_deployment(&app.db_pool, common::USER_B_ID).await; + + // User A sees only their own + let resp = client + .get(format!("{}/api/v1/deployments", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User A should see exactly 1 deployment"); + assert_eq!(list[0]["deployment_hash"].as_str().unwrap(), hash_a); + + // User B sees only their own + let resp = client + .get(format!("{}/api/v1/deployments", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see exactly 1 deployment"); + assert_eq!(list[0]["deployment_hash"].as_str().unwrap(), hash_b); +} + +#[tokio::test] +async fn test_cli_get_deployment_by_hash_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, _, hash_a) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + + // User B tries to fetch User A's deployment by hash + let resp = client + .get(format!("{}/api/v1/deployments/hash/{}", app.address, hash_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's deployment by hash" + ); +} + +#[tokio::test] +async fn test_cli_get_deployment_by_id_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, did_a, _) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!("{}/api/v1/deployments/{}", app.address, did_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's deployment by ID" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker deploy — POST /project/{id}/deploy[/{cloud_id}] +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_deploy_cross_user_project_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let project_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + + // User B tries to deploy User A's project + let deploy_body = serde_json::json!({ + "body": "{}", + "docker_compose": "version: '3'\nservices:\n web:\n image: nginx", + }); + + let resp = client + .post(format!("{}/project/{}/deploy", app.address, project_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .json(&deploy_body) + .send() + .await + .unwrap(); + + // Must be rejected — 403 or 404 + assert!( + resp.status() == StatusCode::NOT_FOUND + || resp.status() == StatusCode::FORBIDDEN + || resp.status() == StatusCode::BAD_REQUEST, + "User B must not deploy User A's project (got {})", + resp.status() + ); +} + +#[tokio::test] +async fn test_cli_deploy_with_cross_user_cloud_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + // User B's project + User A's cloud credentials + let proj_b = common::create_test_project(&app.db_pool, common::USER_B_ID).await; + let cloud_a = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + + let deploy_body = serde_json::json!({ + "body": "{}", + "docker_compose": "version: '3'\nservices:\n web:\n image: nginx", + }); + + // User B tries to deploy their project using User A's cloud creds + let resp = client + .post(format!("{}/project/{}/deploy/{}", app.address, proj_b, cloud_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .json(&deploy_body) + .send() + .await + .unwrap(); + + // Must be rejected + assert!( + resp.status() == StatusCode::NOT_FOUND + || resp.status() == StatusCode::FORBIDDEN + || resp.status() == StatusCode::BAD_REQUEST, + "User B must not use User A's cloud credentials for deploy (got {})", + resp.status() + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker destroy — POST /api/v1/deployments/{id}/force-complete +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_destroy_cross_user_deployment_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, did_a, _) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + + // User B tries to force-complete (destroy) User A's deployment + let resp = client + .post(format!( + "{}/api/v1/deployments/{}/force-complete?force=true", + app.address, did_a + )) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not destroy User A's deployment" + ); +} + +#[tokio::test] +async fn test_cli_destroy_own_deployment_allowed() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, did_a, _) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + + // User A can force-complete their own deployment + let resp = client + .post(format!( + "{}/api/v1/deployments/{}/force-complete?force=true", + app.address, did_a + )) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .unwrap(); + + assert!( + resp.status().is_success(), + "Owner should be able to destroy their own deployment (got {})", + resp.status() + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker deploy — enqueue agent command on other user's deployment +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_enqueue_command_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_, _, hash_a) = seed_full_deployment(&app.db_pool, common::USER_A_ID).await; + + let cmd = serde_json::json!({ + "deployment_hash": hash_a, + "command_type": "health_check", + "parameters": {}, + }); + + // User B tries to enqueue a command on User A's deployment + let resp = client + .post(format!("{}/api/v1/agent/commands/enqueue", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .json(&cmd) + .send() + .await + .unwrap(); + + assert!( + resp.status() == StatusCode::NOT_FOUND + || resp.status() == StatusCode::FORBIDDEN + || resp.status() == StatusCode::BAD_REQUEST, + "User B must not enqueue commands on User A's deployment (got {})", + resp.status() + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker delete project — DELETE /project/{id} +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_delete_project_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .delete(format!("{}/project/{}", app.address, proj_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not delete User A's project" + ); + + // Verify project still exists — User A can still fetch it + let resp = client + .get(format!("{}/project/{}", app.address, proj_a)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .unwrap(); + assert!( + resp.status().is_success(), + "Project should still exist after cross-user delete attempt" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker delete cloud — DELETE /cloud/{id} +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_delete_cloud_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let cloud_a = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + + let resp = client + .delete(format!("{}/cloud/{}", app.address, cloud_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not delete User A's cloud credentials" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// stacker delete server — DELETE /server/{id} +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_delete_server_cross_user_rejected() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let server_a = common::create_test_server( + &app.db_pool, common::USER_A_ID, proj_a, "ready", None, + ).await; + + let resp = client + .delete(format!("{}/server/{}", app.address, server_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .unwrap(); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not delete User A's server" + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Unauthenticated access denied on all CLI endpoints +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[tokio::test] +async fn test_cli_endpoints_reject_unauthenticated() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let endpoints = vec![ + ("GET", format!("{}/project", app.address)), + ("GET", format!("{}/cloud", app.address)), + ("GET", format!("{}/server", app.address)), + ("GET", format!("{}/api/v1/deployments", app.address)), + ]; + + for (method, url) in endpoints { + let resp = match method { + "GET" => client.get(&url).send().await.unwrap(), + _ => unreachable!(), + }; + + let status = resp.status(); + assert!( + status == StatusCode::UNAUTHORIZED || status == StatusCode::FORBIDDEN, + "{} {} should reject unauthenticated (got {})", + method, url, status + ); + } +} diff --git a/tests/security_client.rs b/tests/security_client.rs new file mode 100644 index 00000000..9cc26eaf --- /dev/null +++ b/tests/security_client.rs @@ -0,0 +1,173 @@ +mod common; + +use common::{USER_A_ID, USER_A_TOKEN, USER_B_TOKEN}; +use sqlx::Row; + +/// User A creates a client. User B tries to update/enable/disable → rejected (400). +/// Verifies cross-user data isolation on client endpoints. + +async fn insert_client(pool: &sqlx::PgPool, user_id: &str) -> i32 { + let rec = sqlx::query( + "INSERT INTO client (user_id, title, secret, enabled) \ + VALUES ($1, 'test-client', 'secret123', true) RETURNING id", + ) + .bind(user_id) + .fetch_one(pool) + .await + .unwrap(); + rec.get("id") +} + +#[tokio::test] +async fn test_list_clients_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // User A owns a client + let client_id = insert_client(&app.db_pool, USER_A_ID).await; + assert!(client_id > 0); + + // User B tries to access User A's client via update (no list endpoint exists). + // This confirms B cannot interact with A's client at all. + let resp = client + .put(format!("{}/client/{}", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!( + !resp.status().is_success(), + "User B should not be able to access User A's client" + ); +} + +#[tokio::test] +async fn test_update_client_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let client_id = insert_client(&app.db_pool, USER_A_ID).await; + + let resp = client + .put(format!("{}/client/{}", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + // Handler returns 400 Bad Request for non-owner + assert_eq!( + resp.status().as_u16(), + 400, + "User B updating User A's client should return 400" + ); +} + +#[tokio::test] +async fn test_enable_client_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + // Create a disabled client (secret = NULL) for User A + let rec = sqlx::query( + "INSERT INTO client (user_id, secret, enabled) \ + VALUES ($1, NULL, false) RETURNING id", + ) + .bind(USER_A_ID) + .fetch_one(&app.db_pool) + .await + .unwrap(); + let client_id: i32 = rec.get("id"); + + let resp = client + .put(format!("{}/client/{}/enable", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 400, + "User B enabling User A's client should return 400" + ); +} + +#[tokio::test] +async fn test_disable_client_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let client_id = insert_client(&app.db_pool, USER_A_ID).await; + + let resp = client + .put(format!("{}/client/{}/disable", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 400, + "User B disabling User A's client should return 400" + ); +} + +#[tokio::test] +async fn test_owner_can_manage_own_client() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let client_id = insert_client(&app.db_pool, USER_A_ID).await; + + // Owner can update (regenerate secret) + let resp = client + .put(format!("{}/client/{}", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should be able to update own client, got {}", + resp.status() + ); + + // Owner can disable + let resp = client + .put(format!("{}/client/{}/disable", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should be able to disable own client, got {}", + resp.status() + ); + + // Owner can re-enable + let resp = client + .put(format!("{}/client/{}/enable", &app.address, client_id)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + assert!( + resp.status().is_success(), + "Owner should be able to enable own client, got {}", + resp.status() + ); +} diff --git a/tests/security_cloud.rs b/tests/security_cloud.rs new file mode 100644 index 00000000..7848a2c3 --- /dev/null +++ b/tests/security_cloud.rs @@ -0,0 +1,138 @@ +mod common; + +/// IDOR security tests for /cloud endpoints. +/// Verify that User B cannot list, read, update, or delete User A's cloud credentials. + +#[tokio::test] +async fn test_list_clouds_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + // User A creates 2 clouds, User B creates 1 + let _ca1 = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + let _ca2 = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-aws", "aws").await; + let _cb1 = common::create_test_cloud(&app.db_pool, common::USER_B_ID, "b-do", "digitalocean").await; + + let client = reqwest::Client::new(); + + // User A lists → sees exactly 2 + let resp = client + .get(&format!("{}/cloud", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 2, "User A should see exactly 2 clouds"); + + // User B lists → sees exactly 1 + let resp = client + .get(&format!("{}/cloud", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see exactly 1 cloud"); +} + +#[tokio::test] +async fn test_get_cloud_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let cloud_id = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + let client = reqwest::Client::new(); + + // User B tries to GET User A's cloud → 404 + let resp = client + .get(&format!("{}/cloud/{}", &app.address, cloud_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::NOT_FOUND, + "User B must not read User A's cloud" + ); +} + +#[tokio::test] +async fn test_update_cloud_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let cloud_id = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + let client = reqwest::Client::new(); + + // User B tries to PUT User A's cloud → 400 (bad_request = IDOR guard) + let resp = client + .put(&format!("{}/cloud/{}", &app.address, cloud_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .header("Content-Type", "application/json") + .body(r#"{"provider":"htz","cloud_token":"stolen","save_token":true}"#) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::BAD_REQUEST, + "User B must not update User A's cloud" + ); +} + +#[tokio::test] +async fn test_delete_cloud_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let cloud_id = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + let client = reqwest::Client::new(); + + // User B tries to DELETE User A's cloud → 400 (bad_request = IDOR guard) + let resp = client + .delete(&format!("{}/cloud/{}", &app.address, cloud_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::BAD_REQUEST, + "User B must not delete User A's cloud" + ); +} + +#[tokio::test] +async fn test_owner_can_access_own_cloud() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let cloud_id = common::create_test_cloud(&app.db_pool, common::USER_A_ID, "a-htz", "htz").await; + let client = reqwest::Client::new(); + + // User A GETs own cloud → 200 + let resp = client + .get(&format!("{}/cloud/{}", &app.address, cloud_id)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::OK, + "Owner must be able to read own cloud" + ); + let body: serde_json::Value = resp.json().await.unwrap(); + assert!(body["item"].is_object(), "expected item object in response"); +} diff --git a/tests/security_commands.rs b/tests/security_commands.rs new file mode 100644 index 00000000..cc308495 --- /dev/null +++ b/tests/security_commands.rs @@ -0,0 +1,136 @@ +/// IDOR security tests for command endpoints. +/// +/// Commands are scoped to a deployment_hash. These tests verify that User B +/// cannot read commands belonging to User A's deployments. +mod common; + +use reqwest::StatusCode; + +/// Seed a deployment and insert a command for the given user. +/// Returns (deployment_hash, command_id). +async fn seed_deployment_with_command( + pool: &sqlx::PgPool, + user_id: &str, +) -> (String, String) { + let project_id = common::create_test_project(pool, user_id).await; + let hash = format!("dpl-{}", uuid::Uuid::new_v4()); + let _deployment_id = + common::create_test_deployment(pool, user_id, project_id, &hash).await; + + let command_id = format!("cmd-{}", uuid::Uuid::new_v4()); + sqlx::query( + "INSERT INTO commands (command_id, deployment_hash, type, status, parameters, created_by, created_at) + VALUES ($1, $2, $3, 'queued', '{}'::jsonb, $4, NOW())", + ) + .bind(&command_id) + .bind(&hash) + .bind("status") + .bind(user_id) + .execute(pool) + .await + .expect("Failed to insert test command"); + + (hash, command_id) +} + +// ── KNOWN VULNERABLE: list commands leaks across users ────────────────── + +/// User B should NOT see commands for User A's deployment. +/// Currently the endpoint performs no ownership check on the deployment. +#[tokio::test] +async fn test_list_commands_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (hash_a, _cmd_id) = seed_deployment_with_command(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!("{}/api/v1/commands/{}", app.address, hash_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + // After fix this should be 404 or an empty list + let status = resp.status(); + let body: serde_json::Value = resp.json().await.unwrap(); + + if status == StatusCode::OK { + let list = body["list"].as_array().expect("list should be an array"); + assert!( + list.is_empty(), + "User B should not see User A's commands (got {} items)", + list.len() + ); + } else { + assert_eq!(status, StatusCode::NOT_FOUND); + } +} + +// ── KNOWN VULNERABLE: get command detail leaks across users ───────────── + +/// User B should NOT be able to fetch a specific command from User A's deployment. +#[tokio::test] +async fn test_get_command_detail_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (hash_a, cmd_id) = seed_deployment_with_command(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!( + "{}/api/v1/commands/{}/{}", + app.address, hash_a, cmd_id + )) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not read User A's command detail" + ); +} + +// ── Positive: owner can list own commands ──────────────────────────────── + +#[tokio::test] +async fn test_owner_can_list_own_commands() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (hash_a, cmd_id) = seed_deployment_with_command(&app.db_pool, common::USER_A_ID).await; + + // List + let resp = client + .get(format!("{}/api/v1/commands/{}", app.address, hash_a)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("list should be an array"); + assert!( + list.iter().any(|c| c["command_id"].as_str() == Some(&cmd_id)), + "Owner should see their own command in the list" + ); + + // Detail + let resp = client + .get(format!( + "{}/api/v1/commands/{}/{}", + app.address, hash_a, cmd_id + )) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + assert_eq!(body["item"]["command_id"].as_str(), Some(cmd_id.as_str())); +} diff --git a/tests/security_deployment.rs b/tests/security_deployment.rs new file mode 100644 index 00000000..66274a31 --- /dev/null +++ b/tests/security_deployment.rs @@ -0,0 +1,156 @@ +/// IDOR security tests for deployment endpoints. +/// +/// Verify that deployments are isolated per user — User B cannot read User A's data. +mod common; + +use reqwest::StatusCode; + +/// Helper: create a project + deployment for the given user, return (project_id, deployment_id, hash). +async fn seed_deployment( + pool: &sqlx::PgPool, + user_id: &str, +) -> (i32, i32, String) { + let project_id = common::create_test_project(pool, user_id).await; + let hash = format!("dpl-{}", uuid::Uuid::new_v4()); + let deployment_id = + common::create_test_deployment(pool, user_id, project_id, &hash).await; + (project_id, deployment_id, hash) +} + +// ── List ──────────────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_list_deployments_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + // Seed one deployment per user + let (_pid_a, _did_a, _hash_a) = seed_deployment(&app.db_pool, common::USER_A_ID).await; + let (_pid_b, _did_b, _hash_b) = seed_deployment(&app.db_pool, common::USER_B_ID).await; + + // User A lists — should see only their own + let resp = client + .get(format!("{}/api/v1/deployments", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("list should be an array"); + assert_eq!(list.len(), 1, "User A should see exactly 1 deployment"); + assert_eq!(list[0]["deployment_hash"].as_str().unwrap(), _hash_a); + + // User B lists — should see only their own + let resp = client + .get(format!("{}/api/v1/deployments", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("list should be an array"); + assert_eq!(list.len(), 1, "User B should see exactly 1 deployment"); + assert_eq!(list[0]["deployment_hash"].as_str().unwrap(), _hash_b); +} + +// ── Get by ID ─────────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_get_deployment_by_id_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_pid, did, _hash) = seed_deployment(&app.db_pool, common::USER_A_ID).await; + + // User B tries to access User A's deployment by ID + let resp = client + .get(format!("{}/api/v1/deployments/{}", app.address, did)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's deployment by ID" + ); +} + +// ── Get by hash ───────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_get_deployment_by_hash_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_pid, _did, hash) = seed_deployment(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!("{}/api/v1/deployments/hash/{}", app.address, hash)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's deployment by hash" + ); +} + +// ── Get by project ────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_get_deployment_by_project_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (pid, _did, _hash) = seed_deployment(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!("{}/api/v1/deployments/project/{}", app.address, pid)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not access User A's deployment by project" + ); +} + +// ── Positive: owner can access own ────────────────────────────────────── + +#[tokio::test] +async fn test_owner_can_access_own_deployment() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_pid, did, hash) = seed_deployment(&app.db_pool, common::USER_A_ID).await; + + // By ID + let resp = client + .get(format!("{}/api/v1/deployments/{}", app.address, did)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + assert_eq!(body["item"]["deployment_hash"].as_str().unwrap(), hash); + + // By hash + let resp = client + .get(format!("{}/api/v1/deployments/hash/{}", app.address, hash)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!(resp.status(), StatusCode::OK); +} diff --git a/tests/security_pipes.rs b/tests/security_pipes.rs new file mode 100644 index 00000000..d0c3afdf --- /dev/null +++ b/tests/security_pipes.rs @@ -0,0 +1,218 @@ +/// IDOR security tests for pipe template and pipe instance endpoints. +/// +/// Pipe templates have `is_public` and `created_by` columns. +/// Pipe instances are tied to a deployment_hash and `created_by`. +/// These tests verify that private data is not leaked across users. +mod common; + +use reqwest::StatusCode; +use sqlx::Row; + +/// Insert a private pipe template for the given user. Returns its UUID. +async fn seed_pipe_template(pool: &sqlx::PgPool, user_id: &str) -> uuid::Uuid { + let name = format!("test-tmpl-{}", uuid::Uuid::new_v4()); + let row = sqlx::query( + "INSERT INTO pipe_templates (name, source_app_type, source_endpoint, target_app_type, target_endpoint, field_mapping, is_public, created_by) + VALUES ($1, 'app-a', '{\"path\":\"/api\"}'::jsonb, 'app-b', '{\"path\":\"/api\"}'::jsonb, '{}'::jsonb, false, $2) + RETURNING id", + ) + .bind(&name) + .bind(user_id) + .fetch_one(pool) + .await + .expect("Failed to insert test pipe template"); + + row.get::("id") +} + +/// Seed a deployment + pipe instance for the given user. Returns (deployment_hash, instance_id). +async fn seed_pipe_instance( + pool: &sqlx::PgPool, + user_id: &str, +) -> (String, uuid::Uuid) { + let project_id = common::create_test_project(pool, user_id).await; + let hash = format!("dpl-{}", uuid::Uuid::new_v4()); + let _did = common::create_test_deployment(pool, user_id, project_id, &hash).await; + + let row = sqlx::query( + "INSERT INTO pipe_instances (deployment_hash, source_container, status, created_by) + VALUES ($1, 'my-app', 'active', $2) + RETURNING id", + ) + .bind(&hash) + .bind(user_id) + .fetch_one(pool) + .await + .expect("Failed to insert test pipe instance"); + + let instance_id = row.get::("id"); + (hash, instance_id) +} + +// ── KNOWN VULNERABLE: list templates returns all (no user filter) ─────── + +/// User B should NOT see User A's private templates. +/// Currently the endpoint returns all templates regardless of ownership. +#[tokio::test] +async fn test_list_pipe_templates_leaks_all() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let tmpl_id = seed_pipe_template(&app.db_pool, common::USER_A_ID).await; + + // User B lists templates (not requesting public_only) + let resp = client + .get(format!("{}/api/v1/pipes/templates", app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("list should be an array"); + + let ids: Vec<&str> = list + .iter() + .filter_map(|t| t["id"].as_str()) + .collect(); + + assert!( + !ids.contains(&tmpl_id.to_string().as_str()), + "User B should not see User A's private template {} in the list (found {} templates)", + tmpl_id, + list.len() + ); +} + +// ── KNOWN VULNERABLE: get template ignores _user ──────────────────────── + +/// User B should NOT be able to fetch User A's private template by ID. +#[tokio::test] +async fn test_get_pipe_template_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let tmpl_id = seed_pipe_template(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!( + "{}/api/v1/pipes/templates/{}", + app.address, tmpl_id + )) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not read User A's private pipe template" + ); +} + +// ── KNOWN VULNERABLE: list instances has no user check ────────────────── + +/// User B should NOT see pipe instances for User A's deployment. +#[tokio::test] +async fn test_list_pipe_instances_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (hash_a, _inst_id) = seed_pipe_instance(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!( + "{}/api/v1/pipes/instances/{}", + app.address, hash_a + )) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + let status = resp.status(); + let body: serde_json::Value = resp.json().await.unwrap(); + + if status == StatusCode::OK { + let list = body["list"].as_array().expect("list should be an array"); + assert!( + list.is_empty(), + "User B should not see User A's pipe instances (got {} items)", + list.len() + ); + } else { + assert_eq!(status, StatusCode::NOT_FOUND); + } +} + +// ── KNOWN VULNERABLE: get instance ignores _user ──────────────────────── + +/// User B should NOT be able to fetch User A's pipe instance by ID. +#[tokio::test] +async fn test_get_pipe_instance_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (_hash_a, inst_id) = seed_pipe_instance(&app.db_pool, common::USER_A_ID).await; + + let resp = client + .get(format!( + "{}/api/v1/pipes/instances/detail/{}", + app.address, inst_id + )) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!( + resp.status(), + StatusCode::NOT_FOUND, + "User B must not read User A's pipe instance" + ); +} + +// ── Positive: owner can list own pipe instances ───────────────────────── + +#[tokio::test] +async fn test_owner_can_list_own_pipe_instances() { + let Some(app) = common::spawn_app_two_users().await else { return }; + let client = reqwest::Client::new(); + + let (hash_a, inst_id) = seed_pipe_instance(&app.db_pool, common::USER_A_ID).await; + + // List + let resp = client + .get(format!( + "{}/api/v1/pipes/instances/{}", + app.address, hash_a + )) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!(resp.status(), StatusCode::OK); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("list should be an array"); + assert!( + list.iter() + .any(|i| i["id"].as_str() == Some(&inst_id.to_string())), + "Owner should see their own pipe instance in the list" + ); + + // Detail + let resp = client + .get(format!( + "{}/api/v1/pipes/instances/detail/{}", + app.address, inst_id + )) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + + assert_eq!(resp.status(), StatusCode::OK); +} diff --git a/tests/security_project.rs b/tests/security_project.rs new file mode 100644 index 00000000..a0a1c486 --- /dev/null +++ b/tests/security_project.rs @@ -0,0 +1,138 @@ +mod common; + +/// IDOR security tests for /project endpoints. +/// Verify that User B cannot list, read, update, or delete User A's projects. + +#[tokio::test] +async fn test_list_projects_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + // User A creates 2 projects, User B creates 1 + let _pa1 = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let _pa2 = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let _pb1 = common::create_test_project(&app.db_pool, common::USER_B_ID).await; + + let client = reqwest::Client::new(); + + // User A lists → sees exactly 2 + let resp = client + .get(&format!("{}/project", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 2, "User A should see exactly 2 projects"); + + // User B lists → sees exactly 1 + let resp = client + .get(&format!("{}/project", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see exactly 1 project"); +} + +#[tokio::test] +async fn test_get_project_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let project_id = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let client = reqwest::Client::new(); + + // User B tries to GET User A's project → 404 + let resp = client + .get(&format!("{}/project/{}", &app.address, project_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::NOT_FOUND, + "User B must not read User A's project" + ); +} + +#[tokio::test] +async fn test_update_project_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let project_id = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let client = reqwest::Client::new(); + + // User B tries to PUT User A's project → 400 (bad_request = IDOR guard) + let resp = client + .put(&format!("{}/project/{}", &app.address, project_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .header("Content-Type", "application/json") + .body(r#"{"custom_stack_code":"hijacked","commonDomain":"test.com","dockerhub_user":"x","dockerhub_password":"x","apps":[]}"#) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::BAD_REQUEST, + "User B must not update User A's project" + ); +} + +#[tokio::test] +async fn test_delete_project_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let project_id = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let client = reqwest::Client::new(); + + // User B tries to DELETE User A's project → 400 (bad_request = IDOR guard) + let resp = client + .delete(&format!("{}/project/{}", &app.address, project_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::BAD_REQUEST, + "User B must not delete User A's project" + ); +} + +#[tokio::test] +async fn test_owner_can_access_own_project() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let project_id = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let client = reqwest::Client::new(); + + // User A GETs own project → 200 + let resp = client + .get(&format!("{}/project/{}", &app.address, project_id)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::OK, + "Owner must be able to read own project" + ); + let body: serde_json::Value = resp.json().await.unwrap(); + assert!(body["item"].is_object(), "expected item object in response"); +} diff --git a/tests/security_rating.rs b/tests/security_rating.rs new file mode 100644 index 00000000..eb1b2d73 --- /dev/null +++ b/tests/security_rating.rs @@ -0,0 +1,116 @@ +mod common; + +use common::{USER_A_ID, USER_A_TOKEN, USER_B_TOKEN}; +use sqlx::Row; + +/// Rating edit/delete endpoints check `rating.user_id == user.id`. +/// Non-owner attempts return 404 (the handler treats missing-or-not-owned as "not found"). + +async fn insert_rating(pool: &sqlx::PgPool, user_id: &str) -> i32 { + let rec = sqlx::query( + "INSERT INTO rating (user_id, obj_id, rating, comment, category) \ + VALUES ($1, 1, 5, 'great', 'Application') RETURNING id", + ) + .bind(user_id) + .fetch_one(pool) + .await + .unwrap(); + rec.get("id") +} + +#[tokio::test] +async fn test_edit_rating_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let rating_id = insert_rating(&app.db_pool, USER_A_ID).await; + + let resp = client + .put(format!("{}/rating/{}", &app.address, rating_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .header("Content-Type", "application/json") + .body(serde_json::json!({"comment": "hacked", "rate": 1}).to_string()) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 404, + "User B editing User A's rating should return 404" + ); +} + +#[tokio::test] +async fn test_delete_rating_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let rating_id = insert_rating(&app.db_pool, USER_A_ID).await; + + let resp = client + .delete(format!("{}/rating/{}", &app.address, rating_id)) + .header("Authorization", format!("Bearer {}", USER_B_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert_eq!( + resp.status().as_u16(), + 404, + "User B deleting User A's rating should return 404" + ); + + // Verify the rating is still intact + let count: i64 = + sqlx::query_scalar("SELECT COUNT(*) FROM rating WHERE id = $1 AND hidden = false") + .bind(rating_id) + .fetch_one(&app.db_pool) + .await + .unwrap(); + assert_eq!(count, 1, "Rating should not be deleted by non-owner"); +} + +#[tokio::test] +async fn test_owner_can_edit_own_rating() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + let client = reqwest::Client::new(); + + let rating_id = insert_rating(&app.db_pool, USER_A_ID).await; + + // Owner edits the rating + let resp = client + .put(format!("{}/rating/{}", &app.address, rating_id)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .header("Content-Type", "application/json") + .body(serde_json::json!({"comment": "updated comment", "rate": 8}).to_string()) + .send() + .await + .expect("Failed to send request"); + + assert!( + resp.status().is_success(), + "Owner should edit own rating, got {}", + resp.status() + ); + + // Owner deletes (soft-delete) the rating + let resp = client + .delete(format!("{}/rating/{}", &app.address, rating_id)) + .header("Authorization", format!("Bearer {}", USER_A_TOKEN)) + .send() + .await + .expect("Failed to send request"); + + assert!( + resp.status().is_success(), + "Owner should delete own rating, got {}", + resp.status() + ); +} diff --git a/tests/security_server.rs b/tests/security_server.rs new file mode 100644 index 00000000..c0a56acd --- /dev/null +++ b/tests/security_server.rs @@ -0,0 +1,155 @@ +mod common; + +/// IDOR security tests for /server endpoints. +/// Verify that User B cannot list, read, or delete User A's servers. + +#[tokio::test] +async fn test_list_servers_only_returns_own() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + // Each user needs a project (FK constraint: server.project_id → project.id) + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let proj_b = common::create_test_project(&app.db_pool, common::USER_B_ID).await; + + // User A: 2 servers, User B: 1 server + let _sa1 = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + let _sa2 = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + let _sb1 = + common::create_test_server(&app.db_pool, common::USER_B_ID, proj_b, "none", None).await; + + let client = reqwest::Client::new(); + + // User A lists → sees exactly 2 + let resp = client + .get(&format!("{}/server", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 2, "User A should see exactly 2 servers"); + + // User B lists → sees exactly 1 + let resp = client + .get(&format!("{}/server", &app.address)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert!(resp.status().is_success()); + let body: serde_json::Value = resp.json().await.unwrap(); + let list = body["list"].as_array().expect("expected list"); + assert_eq!(list.len(), 1, "User B should see exactly 1 server"); +} + +#[tokio::test] +async fn test_get_server_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let server_id = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + + let client = reqwest::Client::new(); + + // User B tries to GET User A's server → 404 + let resp = client + .get(&format!("{}/server/{}", &app.address, server_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::NOT_FOUND, + "User B must not read User A's server" + ); +} + +#[tokio::test] +async fn test_get_server_by_project_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let _server_id = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + + let client = reqwest::Client::new(); + + // User B tries to GET servers by User A's project → 404 + let resp = client + .get(&format!("{}/server/project/{}", &app.address, proj_a)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::NOT_FOUND, + "User B must not list servers by User A's project" + ); +} + +#[tokio::test] +async fn test_delete_server_rejects_other_user() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let server_id = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + + let client = reqwest::Client::new(); + + // User B tries to DELETE User A's server → 400 (bad_request = IDOR guard) + let resp = client + .delete(&format!("{}/server/{}", &app.address, server_id)) + .header("Authorization", format!("Bearer {}", common::USER_B_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::BAD_REQUEST, + "User B must not delete User A's server" + ); +} + +#[tokio::test] +async fn test_owner_can_access_own_server() { + let Some(app) = common::spawn_app_two_users().await else { + return; + }; + + let proj_a = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let server_id = + common::create_test_server(&app.db_pool, common::USER_A_ID, proj_a, "none", None).await; + + let client = reqwest::Client::new(); + + // User A GETs own server → 200 + let resp = client + .get(&format!("{}/server/{}", &app.address, server_id)) + .header("Authorization", format!("Bearer {}", common::USER_A_TOKEN)) + .send() + .await + .expect("request failed"); + assert_eq!( + resp.status(), + reqwest::StatusCode::OK, + "Owner must be able to read own server" + ); + let body: serde_json::Value = resp.json().await.unwrap(); + assert!(body["item"].is_object(), "expected item object in response"); +}