diff --git a/.github/workflows/DeployManual.yml b/.github/workflows/DeployManual.yml index f0eea48..4fd9c34 100644 --- a/.github/workflows/DeployManual.yml +++ b/.github/workflows/DeployManual.yml @@ -1,14 +1,17 @@ -name: Publish to NPM +name: Publish to NPM and MCP Registry on: release: types: [published] workflow_dispatch: inputs: tag: - description: "tag name e.g. beta, latest etc." + description: 'tag name e.g. beta, latest etc.' jobs: build: runs-on: ubuntu-latest + permissions: + id-token: write + contents: read steps: - name: Checkout uses: actions/checkout@v4 @@ -16,8 +19,8 @@ jobs: uses: actions/setup-node@v4 with: node-version: 20 - registry-url: "https://registry.npmjs.org" - scope: "@provartesting" + registry-url: 'https://registry.npmjs.org' + scope: '@provartesting' - name: Install dependencies and build run: | npm install -g @salesforce/cli @@ -32,3 +35,15 @@ jobs: env: TAG: ${{ github.event.inputs.tag || 'latest' }} NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + - name: Install mcp-publisher + run: | + curl -L "https://github.com/modelcontextprotocol/registry/releases/latest/download/mcp-publisher_$(uname -s | tr '[:upper:]' '[:lower:]')_$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/').tar.gz" | tar xz mcp-publisher + - name: Sync version into server.json + if: github.event_name == 'release' + run: | + VERSION=${GITHUB_REF#refs/tags/v} + jq --arg v "$VERSION" '.version = $v | .packages[0].version = $v' server.json > server.tmp && mv server.tmp server.json + - name: Authenticate to MCP Registry + run: ./mcp-publisher login github-oidc + - name: Publish to MCP Registry + run: ./mcp-publisher publish diff --git a/docs/mcp-pilot-guide.md b/docs/mcp-pilot-guide.md index 2ca3fc7..bc7283e 100644 --- a/docs/mcp-pilot-guide.md +++ b/docs/mcp-pilot-guide.md @@ -371,7 +371,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba ### Scenario 9 (requires API key): AI Test Generation from User Story -**Goal:** Demonstrate the full Phase 2 AI-assisted test generation loop: org metadata → corpus retrieval → LLM synthesis → generate + validate. +**Goal:** Demonstrate the full AI-assisted test generation loop: org metadata → corpus retrieval → LLM synthesis → generate + validate. **Setup:** diff --git a/docs/mcp.md b/docs/mcp.md index cfa5685..fa3dce8 100644 --- a/docs/mcp.md +++ b/docs/mcp.md @@ -18,6 +18,7 @@ The Provar DX CLI ships with a built-in **Model Context Protocol (MCP) server** - [Available tools](#available-tools) - [provardx.ping](#provardxping) - [provar.project.inspect](#provarprojectinspect) + - [provar.connection.list](#provarconnectionlist) - [provar.pageobject.generate](#provarpageobjectgenerate) - [provar.pageobject.validate](#provarpageobjectvalidate) - [provar.testcase.generate](#provartestcasegenerate) @@ -45,6 +46,7 @@ The Provar DX CLI ships with a built-in **Model Context Protocol (MCP) server** - [provar.qualityhub.defect.create](#provarqualityhubdefectcreate) - [provar.testrun.report.locate](#provartestrunreportlocate) - [provar.testrun.rca](#provartestrunrca) + - [provar.testcase.step.edit](#provartestcasestepedit) - [provar.testplan.add-instance](#provartestplanadinstance) - [provar.testplan.create-suite](#provartestplancreatetsuite) - [provar.testplan.remove-instance](#provartestplanremoveinstance) @@ -529,27 +531,71 @@ Provar test plans live in `plans/`. Each plan is a directory containing a `.plan --- +### `provar.connection.list` + +Lists all connections and named environments defined in the project's `.testproject` file. Use this **before** generating test cases or page objects to discover the exact connection names to use. + +**Prerequisite:** the project must have a `.testproject` file. Run `provar.project.validate` first if unsure of the project root. + +**Security:** only connection names, types, and URLs are returned — credential values from `.secrets` are never included in the output. + +**Input** + +| Parameter | Type | Required | Description | +| -------------- | ------ | -------- | ----------------------------------------------------------------- | +| `project_path` | string | yes | Absolute path to the Provar project root (within `allowed-paths`) | + +**Output** + +```json +{ + "connections": [ + { "name": "MyOrg", "type": "Salesforce", "url": "sfdc://...", "sso_configured": false }, + { "name": "OktaSso", "type": "SSO", "url": "sso://...", "sso_configured": true } + ], + "environments": [ + { "name": "QA", "connection": "MyOrg", "url": "https://qa.example.com" }, + { "name": "UAT", "connection": "AdminOrg" } + ], + "summary": { "connection_count": 2, "environment_count": 2 } +} +``` + +Connection `type` values: `Salesforce`, `Web`, `Quality Hub`, `Web Service`, `Database`, `Google`, `Microsoft`, `Zephyr`, `SSO`, or the raw class name for unknown types. + +**Error codes** + +| Code | Meaning | +| --------------------------- | ------------------------------------------------------------------------- | +| `CONNECTION_FILE_NOT_FOUND` | No `.testproject` at the given path. Run `provar.project.validate` first. | +| `PATH_NOT_ALLOWED` | `project_path` is outside the server's `--allowed-paths` | + +--- + ### `provar.pageobject.generate` -Generates a Java Page Object skeleton with the correct `@Page` or `@SalesforcePage` annotation and `@FindBy` field stubs. +Generates a Java Page Object skeleton with the correct `@Page` or `@SalesforcePage` annotation and `@FindBy` field stubs. Optionally generates an `ILoginPage` implementation stub for non-SF SSO connections. **Input** -| Parameter | Type | Required | Description | -| --------------------------- | ------------------------------------------------------ | -------- | -------------------------------------------------------- | -| `class_name` | string | yes | PascalCase Java class name (e.g. `AccountDetailPage`) | -| `package_name` | string | yes | Java package (e.g. `pageobjects.accounts`) | -| `page_type` | `standard` \| `salesforce` | yes | Generates `@Page` or `@SalesforcePage` annotation | -| `title` | string | no | Page title for the annotation | -| `connection_name` | string | no | Salesforce connection name (for `@SalesforcePage`) | -| `salesforce_page_attribute` | string | no | Additional Salesforce page attribute | -| `fields` | array of `{ name, type, locator_type, locator_value }` | no | WebElement field definitions | -| `output_path` | string | no | Full file path to write (must be within `allowed-paths`) | -| `overwrite` | boolean | no | Overwrite existing file (default: `false`) | -| `dry_run` | boolean | no | Return content without writing to disk | -| `idempotency_key` | string | no | Prevents duplicate generation for the same key | +| Parameter | Type | Required | Description | +| --------------------------- | ------------------------------------------------------------------ | -------- | ------------------------------------------------------------------------------------------------------- | +| `class_name` | string | yes | PascalCase Java class name (e.g. `AccountDetailPage`) | +| `package_name` | string | yes | Java package (e.g. `pageobjects.accounts`) | +| `page_type` | `standard` \| `salesforce` | yes | Generates `@Page` or `@SalesforcePage` annotation | +| `title` | string | no | Page title for the annotation | +| `connection_name` | string | no | Salesforce connection name (for `@SalesforcePage`) | +| `salesforce_page_attribute` | string | no | Additional Salesforce page attribute | +| `fields` | array of `{ name, element_type, locator_strategy, locator_value }` | no | WebElement field definitions | +| `sso_class` | string | no | PascalCase class name for an `ILoginPage` stub (non-SF SSO). Written alongside the page object on disk. | +| `output_path` | string | no | Full file path to write (must be within `allowed-paths`) | +| `overwrite` | boolean | no | Overwrite existing file (default: `false`) | +| `dry_run` | boolean | no | Return content without writing to disk | +| `idempotency_key` | string | no | Prevents duplicate generation for the same key | -**Output** — `{ content: string, file_path?: string, written: boolean }` +**Output** — `{ java_source: string, file_path?: string, written: boolean, sso_stub_source?: string, sso_stub_file_path?: string, sso_stub_written?: boolean }` + +When `sso_class` is provided the response includes `sso_stub_source` (the `ILoginPage` implementation), `sso_stub_file_path` (derived from `output_path`'s directory), and `sso_stub_written`. --- @@ -586,19 +632,35 @@ Validates a Java Page Object source file against 30+ quality rules (structural c Generates an XML test case skeleton with UUID v4 guids and sequential `testItemId` values. +**URI-aware XML structure:** use `target_uri` to pick the correct XML nesting: + +- Omit or use a `sf:` URI → flat Salesforce step structure (existing behaviour) +- `ui:pageobject:target?pageId=pageobjects.Page` → wraps all steps in a `UiWithScreen` element (testItemId=1); substeps clause at testItemId=2; inner steps start at testItemId=3 + **Input** -| Parameter | Type | Required | Description | -| ----------------- | --------------------------------------- | -------- | --------------------------------------------------- | -| `test_case_name` | string | yes | Human-readable test case name | -| `test_case_id` | string | no | Custom test case ID (auto-generated if omitted) | -| `steps` | array of `{ api_id, name, arguments? }` | no | Step definitions | -| `output_path` | string | no | File path to write (must be within `allowed-paths`) | -| `overwrite` | boolean | no | Overwrite existing file (default: `false`) | -| `dry_run` | boolean | no | Return XML without writing to disk | -| `idempotency_key` | string | no | Prevents duplicate generation for the same key | +| Parameter | Type | Required | Description | +| --------------------- | ---------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------- | +| `test_case_name` | string | yes | Human-readable test case name | +| `test_case_id` | string | no | Custom test case ID (auto-generated if omitted) | +| `steps` | array of `{ api_id, name, attributes? }` | no | Step definitions | +| `target_uri` | string | no | Page object URI. `ui:pageobject:target?pageId=pageobjects.X` triggers `UiWithScreen` nesting; `sf:` or absent → flat. | +| `output_path` | string | no | File path to write (must be within `allowed-paths`) | +| `overwrite` | boolean | no | Overwrite existing file (default: `false`) | +| `dry_run` | boolean | no | Return XML without writing to disk | +| `validate_after_edit` | boolean | no | Run structural validation after generation (default: `true`). Returns `TESTCASE_INVALID` if invalid. Set `false` to skip. | +| `idempotency_key` | string | no | Prevents duplicate generation for the same key | + +**Output** — `{ xml_content: string, file_path?: string, written: boolean, validation?: ValidationResult }` -**Output** — `{ content: string, file_path?: string, written: boolean }` +`validation` is present when `validate_after_edit=true` (default). If the generated XML fails validation the tool returns `TESTCASE_INVALID` with the `validation` field in `details`. + +**Error codes** + +| Code | Meaning | +| ------------------ | --------------------------------------------------------------------- | +| `TESTCASE_INVALID` | Generated XML failed structural validation (see `details.validation`) | +| `FILE_EXISTS` | `output_path` already exists and `overwrite=false` | --- @@ -807,7 +869,9 @@ Generates a `provardx-properties.json` file from the standard template. Placehol ### `provar.properties.read` -Reads and parses a `provardx-properties.json` file. Use this to inspect the current configuration before making changes with `provar.properties.set`. +Reads and parses a `provardx-properties.json` file directly from disk. Use this to inspect the current configuration before making changes with `provar.properties.set`. + +If the file you read differs on critical fields (`provarHome`, `projectPath`, `resultsPath`) from the file currently registered via `provar.automation.config.load`, the response will include a `details.warning` listing the divergent keys. This catches the common case where the agent reads one file but test runs use another. **Input** @@ -815,9 +879,9 @@ Reads and parses a `provardx-properties.json` file. Use this to inspect the curr | ----------- | ------ | -------- | ------------------------------------------- | | `file_path` | string | yes | Path to the `provardx-properties.json` file | -**Output** — `{ file_path, content }` where `content` is the parsed JSON object. +**Output** — `{ requestId, file_path, content[, details.warning] }` where `content` is the parsed JSON object. `details.warning` is present when the file diverges from the active sf config. -**Error codes:** `FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED` +**Error codes:** `PROPERTIES_FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED`, `PATH_TRAVERSAL` --- @@ -853,7 +917,7 @@ Updates one or more fields in a `provardx-properties.json` file. Only the suppli **Output** — `{ file_path, updated_fields, content }` -**Error codes:** `FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED` +**Error codes:** `PROPERTIES_FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED` --- @@ -877,7 +941,7 @@ Validates a `provardx-properties.json` file against the ProvarDX schema. Checks | `warning_count` | Number of warnings (e.g. unfilled placeholders) | | `issues` | Array of `{ field, severity, message }` | -**Error codes:** `MISSING_INPUT`, `FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED` +**Error codes:** `MISSING_INPUT`, `PROPERTIES_FILE_NOT_FOUND`, `MALFORMED_JSON`, `PATH_NOT_ALLOWED` --- @@ -1034,14 +1098,16 @@ Displays information about the currently connected Quality Hub org. Invokes `sf Triggers a Quality Hub test run. Invokes `sf provar quality-hub test run`. Returns the test run ID which can be passed to `provar.qualityhub.testrun.report` to poll for results. +> **Wildcard warning:** if any value in `flags` contains `*` or `?`, the tool adds `details.warning` explaining that QH plan-level reporting will be skipped. Execution still proceeds — the warning is non-blocking. + **Input** -| Parameter | Type | Required | Description | -| ------------ | -------- | -------- | ----------------------------------------------------------------------------- | -| `target_org` | string | yes | SF CLI org alias or username | -| `flags` | string[] | no | Additional raw CLI flags (e.g. `["--configuration-file", "config/run.json"]`) | +| Parameter | Type | Required | Description | +| ------------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| `target_org` | string | yes | SF CLI org alias or username | +| `flags` | string[] | no | Additional raw CLI flags (e.g. `["--plan-name", "SmokeTests"]`). Avoid wildcards in `--plan-name` values — they skip QH plan-level reporting. | -**Output** — `{ requestId, exitCode, stdout, stderr }` +**Output** — `{ requestId, exitCode, stdout, stderr, details?: { warning: string } }` **Error codes:** `QH_TESTRUN_FAILED`, `SF_NOT_FOUND` @@ -1144,9 +1210,20 @@ Triggers a Provar Automation test run using the currently loaded properties file | --------- | -------- | -------- | ------------------------------------------------------------------------ | | `flags` | string[] | no | Raw CLI flags to forward (e.g. `["--project-path", "/path/to/project"]`) | -**Output** — `{ requestId, exitCode, stdout, stderr[, output_lines_suppressed] }` +**Output** — `{ requestId, exitCode, stdout, stderr[, output_lines_suppressed][, steps][, details.warning] }` + +The `stdout` field is filtered before returning: Java schema-validator lines (`com.networknt.schema.*`) and stale logger-lock `SEVERE` warnings are stripped. If any lines were suppressed, `output_lines_suppressed` contains the count. -The `stdout` field is filtered before returning: Java schema-validator lines (`com.networknt.schema.*`) and stale logger-lock `SEVERE` warnings are stripped. If any lines were suppressed, `output_lines_suppressed` contains the count and a note is appended to `stdout`. Use `provar.testrun.rca` to inspect the full raw JUnit output. +After each run, the tool scans the results directory for JUnit XML files and adds a `steps` array when results are found: + +```json +"steps": [ + { "testItemId": "1", "title": "TC-Login-001-LoginAndVerify.testcase", "status": "pass" }, + { "testItemId": "2", "title": "TC-Login-002-ForgotPassword.testcase", "status": "fail", "errorMessage": "Execution failed: Element not found" } +] +``` + +Each entry represents one test case. `status` is `"pass"`, `"fail"`, or `"skip"`. If the results directory cannot be located or contains no JUnit XML, `details.warning` explains why and `steps` is absent. **Error codes:** `AUTOMATION_TESTRUN_FAILED`, `SF_NOT_FOUND` @@ -1259,12 +1336,15 @@ Uses a 4-step resolution algorithm (explicit path → `~/.sf/config.json` → `p Analyse a completed test run and return a structured Root Cause Analysis report. Reads `JUnit.xml`, classifies each failure into a root cause category, extracts page object and operation names, and flags pre-existing failures across prior Increment runs. -| Input | Type | Required | Description | -| -------------- | ------- | -------- | --------------------------------------------------------- | -| `project_path` | string | yes | Absolute path to the Provar project root | -| `results_path` | string | no | Explicit results directory override | -| `run_index` | integer | no | Specific Increment run to analyse (default: latest) | -| `locate_only` | boolean | no | Skip parsing; return artifact paths only (default: false) | +| Input | Type | Required | Description | +| -------------- | ------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `project_path` | string | yes | Absolute path to the Provar project root | +| `results_path` | string | no | Explicit results directory override; must be within `--allowed-paths` if provided | +| `run_index` | integer | no | Specific Increment run to analyse (default: latest) | +| `locate_only` | boolean | no | Skip parsing; return artifact paths only (default: false) | +| `mode` | string | no | `"rca"` (default) — full classification report. `"failures"` — lightweight `[{ testItemId, title, errorMessage }]` array, no RCA fields. | + +**mode=rca output fields:** | Output field | Description | | ----------------------- | ------------------------------------------------------------------------------- | @@ -1276,7 +1356,17 @@ Analyse a completed test run and return a structured Root Cause Analysis report. | `infrastructure_issues` | Recommendations for infra-category failures (credential, driver, license, etc.) | | `recommendations` | Deduplicated list of all recommended actions | -**`FailureReport` fields:** +**mode=failures output fields:** + +| Output field | Description | +| ----------------- | -------------------------------------------------------------------- | +| `results_dir` | Resolved results directory | +| `failures` | `Array<{ testItemId: string, title: string, errorMessage: string }>` | +| `details.warning` | Set when `JUnit.xml` is absent; `failures` will be empty | + +Use `mode="failures"` when you only need the list of failing test case names without loading the full HTML report. Use `mode="rca"` (default) for root-cause classification and fix recommendations. + +**`FailureReport` fields (mode=rca only):** | Field | Description | | --------------------- | -------------------------------------------------------- | @@ -1296,7 +1386,64 @@ Analyse a completed test run and return a structured Root Cause Analysis report. Salesforce DML error categories (`SALESFORCE_*`) represent test-data failures — they appear in `failures[].root_cause_category` but are **not** included in `infrastructure_issues`. -**Error codes:** `RESULTS_NOT_CONFIGURED` +**Error codes:** `RESULTS_NOT_CONFIGURED`, `PATH_NOT_ALLOWED`, `PATH_TRAVERSAL` + +--- + +### `provar.testcase.step.edit` + +Atomically add or remove a single step (``) in a Provar XML test case file. Writes a `.bak` backup before mutating, runs structural validation after the edit, and automatically restores the backup if validation fails. + +Prerequisites: the test case file must exist and be valid XML with a `` structure. + +| Input | Type | Required | Description | +| --------------------- | ------- | -------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `test_case_path` | string | yes | Absolute path to the `.testcase` file; must be within `--allowed-paths` | +| `mode` | string | yes | `"remove"` — delete a step; `"add"` — insert a new step | +| `test_item_id` | string | yes | For `remove`: `testItemId` of the step to delete. For `add`: `testItemId` of the anchor step. | +| `position` | string | no (add only) | `"before"` or `"after"` relative to the anchor step (default: `"after"`) | +| `step_xml` | string | yes (add only) | The `...` XML fragment for the new step. Must be well-formed and contain an `` element. | +| `validate_after_edit` | boolean | no | Run structural validation after the mutation; restores backup on failure (default: `true`) | + +| Output field | Description | +| -------------- | ---------------------------------------------------------- | +| `success` | `true` on successful mutation | +| `test_item_id` | The `test_item_id` that was targeted | +| `mode` | `"remove"` or `"add"` | +| `validation` | `TestCaseValidationResult` when `validate_after_edit=true` | + +**Error codes:** + +| Code | Meaning | +| ------------------------ | ------------------------------------------------------------------------------------------------- | +| `STEP_NOT_FOUND` | No step with the given `testItemId` found; `details.all_test_item_ids` lists every ID in the file | +| `INVALID_STEP_XML` | `step_xml` failed XML parsing or contains no `` element; file is not modified | +| `INVALID_XML_AFTER_EDIT` | Post-mutation validation failed; original file has been restored from backup | +| `FILE_NOT_FOUND` | `test_case_path` does not exist | +| `MISSING_INPUT` | `step_xml` is required for `mode=add` but was not provided | +| `PATH_NOT_ALLOWED` | `test_case_path` or its `.bak` path is outside `--allowed-paths` | + +**Example — remove step 3:** + +```json +{ + "test_case_path": "/projects/myapp/tests/Login.testcase", + "mode": "remove", + "test_item_id": "3" +} +``` + +**Example — insert a Sleep step after step 2:** + +```json +{ + "test_case_path": "/projects/myapp/tests/Login.testcase", + "mode": "add", + "test_item_id": "2", + "position": "after", + "step_xml": "" +} +``` --- diff --git a/package.json b/package.json index 829c272..8c47f57 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { "name": "@provartesting/provardx-cli", "description": "A plugin for the Salesforce CLI to orchestrate testing activities and report quality metrics to Provar Quality Hub", - "version": "1.5.0-beta.9", - "mcpName": "io.github.provartesting/provar", + "version": "1.5.0-beta.11", + "mcpName": "io.github.ProvarTesting/provar", "license": "BSD-3-Clause", "plugins": [ "@provartesting/provardx-plugins-automation", diff --git a/scripts/mcp-smoke.cjs b/scripts/mcp-smoke.cjs index 5bdeb5d..06b709f 100644 --- a/scripts/mcp-smoke.cjs +++ b/scripts/mcp-smoke.cjs @@ -356,6 +356,18 @@ async function runTests() { arguments: { story: 'Verify Users table has at least one Active record after Salesforce flow runs' }, }); + // ── 49. provar.connection.list ──────────────────────────────────────────── + // TMP has no .testproject → CONNECTION_FILE_NOT_FOUND result (not a protocol error) + await callTool('provar.connection.list', { project_path: TMP }); + + // ── 50. provar.testcase.step.edit ───────────────────────────────────────── + // TMP/nonexistent.testcase does not exist → FILE_NOT_FOUND result + await callTool('provar.testcase.step.edit', { + test_case_path: path.join(TMP, 'nonexistent.testcase'), + mode: 'remove', + test_item_id: '1', + }); + server.stdin.end(); } @@ -364,8 +376,8 @@ async function runTests() { // ---------------------------------------------------------------------------- server.on('close', () => { clearTimeout(overallTimer); - // initialize + tools/list + 37 tools + prompts/list + 8 prompts/get (setup excluded from default count) - const TOTAL_EXPECTED = 48 + (INCLUDE_SETUP ? 1 : 0); + // initialize + tools/list + 39 tools + prompts/list + 8 prompts/get (setup excluded from default count) + const TOTAL_EXPECTED = 50 + (INCLUDE_SETUP ? 1 : 0); let passed = 0; let failed = 0; diff --git a/server.json b/server.json index 9c1b475..781cc2a 100644 --- a/server.json +++ b/server.json @@ -1,6 +1,6 @@ { "$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json", - "name": "io.github.provartesting/provar", + "name": "io.github.ProvarTesting/provar", "title": "Provar MCP Server", "description": "Provar MCP: AI-powered Salesforce test automation. Generate, validate, migrate, and run tests.", "websiteUrl": "https://github.com/ProvarTesting/provardx-cli/blob/main/docs/mcp.md", @@ -14,12 +14,12 @@ "url": "https://github.com/ProvarTesting/provardx-cli", "source": "github" }, - "version": "1.5.0-beta.9", + "version": "1.5.0-beta.11", "packages": [ { "registryType": "npm", "identifier": "@provartesting/provardx-cli", - "version": "1.5.0-beta.9", + "version": "1.5.0-beta.11", "transport": { "type": "stdio" }, diff --git a/src/commands/provar/auth/login.ts b/src/commands/provar/auth/login.ts index fb0ae1e..6d4bc7b 100644 --- a/src/commands/provar/auth/login.ts +++ b/src/commands/provar/auth/login.ts @@ -20,7 +20,7 @@ import { Messages.importMessagesDirectoryFromMetaUrl(import.meta.url); const messages = Messages.loadMessages('@provartesting/provardx-cli', 'sf.provar.auth.login'); -// Production values bundled from Phase 2 handoff (2026-04-11). +// Production values bundled at auth handoff (2026-04-11). // Override via PROVAR_COGNITO_DOMAIN / PROVAR_COGNITO_CLIENT_ID for non-prod environments. const DEFAULT_COGNITO_DOMAIN = 'us-east-1xpfwzwmop.auth.us-east-1.amazoncognito.com'; const DEFAULT_CLIENT_ID = '29cs1a784r4cervmth8ugbkkri'; diff --git a/src/mcp/rules/provar_best_practices_rules.json b/src/mcp/rules/provar_best_practices_rules.json index 298b39c..77f98d2 100644 --- a/src/mcp/rules/provar_best_practices_rules.json +++ b/src/mcp/rules/provar_best_practices_rules.json @@ -34,9 +34,7 @@ "category": "XMLSchema", "name": "Test case root element must be testCase", "description": "The root XML element must be . Any other root element prevents the test from being imported into Provar.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "critical", "weight": 10, "recommendation": "Ensure the root element is with proper namespace and attributes.", @@ -52,9 +50,7 @@ "category": "XMLSchema", "name": "Test case must have valid identifier", "description": "Test case must have one of: 'id' (recommended), 'guid' (Provar V3), or 'registryId' (legacy) attribute. Missing identifier prevents test case import.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "critical", "weight": 10, "recommendation": "Add 'id' attribute to element (e.g., id=\"1\"), or use 'guid' for Provar V3 compatibility.", @@ -70,9 +66,7 @@ "category": "XMLSchema", "name": "Test case should have failureBehaviour attribute", "description": "The 'failureBehaviour' attribute is recommended for new tests (default: 'Continue'). While not required, it makes test behavior explicit.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Add failureBehaviour=\"Continue\" to element for explicit failure handling.", @@ -88,9 +82,7 @@ "category": "XMLSchema", "name": "Consider migrating from registryId to id or guid", "description": "Using legacy 'registryId' attribute. Consider migrating to 'id' (simple) or 'guid' (Provar V3) for better compatibility.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Replace registryId with 'id' or 'guid' attribute for Provar V3 compatibility.", @@ -106,9 +98,7 @@ "category": "XMLSchema", "name": "Value elements must not use text attribute", "description": "Content must be inside element, not as text=\"...\" attribute. Using text attribute causes XML parsing errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Change to content", @@ -124,9 +114,7 @@ "category": "XMLSchema", "name": "URI attributes must properly encode ampersands", "description": "Ampersands in URI attributes must be encoded as & for valid XML. Unencoded & causes XML parsing errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Replace & with & in all uri=\"...\" attributes (e.g., uri=\"sf:ui:binding:object?object=Lead&action=View\")", @@ -142,9 +130,7 @@ "category": "XMLSchema", "name": "Test case must have steps element", "description": "Test case must contain a element with at least one apiCall. Missing steps element prevents test case from loading.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "critical", "weight": 10, "recommendation": "Add element containing apiCall steps to the test case.", @@ -160,9 +146,7 @@ "category": "XMLSchema", "name": "Test case should not be empty", "description": "Test case element should contain at least one . Empty test cases have no executable logic.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Add apiCall steps to implement test logic.", @@ -178,9 +162,7 @@ "category": "XMLSchema", "name": "API identifier must be a valid Provar API", "description": "The apiId attribute must reference a real Provar API. Unknown or hallucinated apiIds (like 'ApexDescribeObject') cause complete test step failures because Provar cannot find the API implementation.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Use only valid Provar apiIds. Common corrections: ApexDescribeObject→ApexReadObject, ApexQueryObject→ApexSoqlQuery, ApexInsertObject→ApexCreateObject. Refer to provar_test_step_schema.json for the complete list of valid APIs.", @@ -195,10 +177,7 @@ "category": "NamingConventions", "name": "Folder names are modular and title-cased", "description": "Test folders must be concise, modular, alphabetic, and use Title Case with spaces between words.", - "appliesTo": [ - "Folder", - "TestCase" - ], + "appliesTo": ["Folder", "TestCase"], "severity": "major", "weight": 5, "recommendation": "Rename folder to be concise, alphabetic only, and in Title Case (e.g. 'Test Case Design').", @@ -214,9 +193,7 @@ "category": "NamingConventions", "name": "Test case names use consistent naming convention", "description": "Test case names must follow a consistent naming pattern. NOTE: This rule only applies to test cases with an explicit 'name' attribute (typically callable test cases). The 'id' attribute can be any format.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Use a consistent naming style for callable test case names. Examples: 'CreateAndConvertLead', 'createAndConvertLead', 'Create And Convert Lead'.", @@ -233,9 +210,7 @@ "category": "NamingConventions", "name": "Parameters and variables use camelCase", "description": "Test parameters, variables, and return values must be camelCase and unique within their scope.", - "appliesTo": [ - "Parameter" - ], + "appliesTo": ["Parameter"], "severity": "major", "weight": 5, "recommendation": "Rename parameters and variables to camelCase and ensure no duplicates in the same scope.", @@ -251,10 +226,7 @@ "category": "NamingConventions", "name": "Page Objects use PascalCase", "description": "Page Object names must follow PascalCase and represent a single logical page or screen.", - "appliesTo": [ - "PageObject", - "TestCase" - ], + "appliesTo": ["PageObject", "TestCase"], "severity": "major", "weight": 5, "recommendation": "Rename Page Object to PascalCase (e.g. 'AccountDetailsPage').", @@ -270,10 +242,7 @@ "category": "NamingConventions", "name": "Field names use camelCase", "description": "Field mapping names must be camelCase for consistency and readability.", - "appliesTo": [ - "Field", - "TestCase" - ], + "appliesTo": ["Field", "TestCase"], "severity": "minor", "weight": 2, "recommendation": "Rename field mapping to camelCase (e.g. 'billingStreet').", @@ -289,9 +258,7 @@ "category": "StructureAndGrouping", "name": "All steps are inside Group steps or BDD structure", "description": "Every test step must be contained within a Group Step, BDD design step (Given/When/Then/And/But), or Finally block to improve readability and maintenance.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Wrap root-level steps into logically named Group Steps, BDD steps (Given/When/Then), or Finally blocks.", @@ -308,9 +275,7 @@ "category": "StructureAndGrouping", "name": "Test case has top-level summary", "description": "Each test case must have a summary describing its purpose and usage.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Add a concise summary describing scenario, preconditions, and outcome.", @@ -325,19 +290,14 @@ "category": "StructureAndGrouping", "name": "Custom step names for UI asserts and sets", "description": "UI Assert, Set Values, and long SOQL query steps must be renamed to be descriptive.", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "minor", "weight": 2, "recommendation": "Rename the step to describe what is being asserted or set (e.g. 'assertAccountStatusIsActive').", "check": { "type": "semanticPattern", "target": "step", - "appliesWhen": [ - "step.type in ['UIAssert','SetValues','SOQL']" - ], + "appliesWhen": ["step.type in ['UIAssert','SetValues','SOQL']"], "mustHaveCustomName": true }, "source": "README: Naming Conventions - Test Step Names" @@ -347,9 +307,7 @@ "category": "DataDrivenTesting", "name": "Excel headers match field label or API name", "description": "Excel/data source column headers must match either the field label or API name.", - "appliesTo": [ - "DataSource" - ], + "appliesTo": ["DataSource"], "severity": "major", "weight": 5, "recommendation": "Rename Excel column headers to match Salesforce field labels or API names.", @@ -365,10 +323,7 @@ "category": "DataDrivenTesting", "name": "No Excel functions in data", "description": "Excel test data must be static; functions are not allowed in cells.", - "appliesTo": [ - "DataSource", - "TestCase" - ], + "appliesTo": ["DataSource", "TestCase"], "severity": "major", "weight": 5, "recommendation": "Replace Excel formulas with static values to keep tests deterministic.", @@ -384,9 +339,7 @@ "category": "DataDrivenTesting", "name": "Variable names must use valid identifiers", "description": "Variable names, field references, and result names must contain only letters, digits, and underscores. Spaces, hyphens, and special characters cause RUNTIME FAILURES in Provar. Examples: {Account.Name} ✅ | {Account.Account Name} ❌ (space will fail)", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Replace spaces and special characters with underscores. Use API field names (e.g., Account.Name) instead of field labels (e.g., Account.Account Name).", @@ -403,10 +356,7 @@ "category": "DataDrivenTesting", "name": "No hardcoded values in steps", "description": "Fields and values used more than once in a test must be parameterized or stored in variables.", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "minor", "weight": 3, "recommendation": "Replace repeated literal values with variables or parameters defined in the test case.", @@ -423,9 +373,7 @@ "category": "ReusabilityAndCallables", "name": "Callable tests reside in Callables folder", "description": "Callable tests must be stored under a designated Callables folder for discoverability.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Move callable test into 'tests/Callables' (or equivalent) folder.", @@ -440,9 +388,7 @@ "category": "ReusabilityAndCallables", "name": "Callable tests are parameterized", "description": "Callable tests must define input parameters instead of using internal hardcoded values.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Add input parameters to the callable test and reference them in steps instead of literals.", @@ -458,9 +404,7 @@ "category": "ReusabilityAndCallables", "name": "Callable tests executable in isolation", "description": "Callable tests must be self-contained and executable independently for debugging.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Ensure callable tests create their own preconditions (records, connections) or accept them as parameters.", @@ -476,9 +420,7 @@ "category": "ConnectionsAndEnvironments", "name": "Admin connection supports Login-As", "description": "There must be an Admin connection configured to perform Login-As for other profiles.", - "appliesTo": [ - "Project" - ], + "appliesTo": ["Project"], "severity": "major", "weight": 5, "recommendation": "Create an Admin connection with 'login as' permissions for target profiles.", @@ -494,9 +436,7 @@ "category": "ConnectionsAndEnvironments", "name": "Connection names should not contain environment specifiers", "description": "Connection names should be environment-agnostic. Avoid embedding UAT, QA, Sandbox, Prod, Production, or Scratch in connection names. Use environment overrides instead.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Remove environment specifiers from connection names. Create environment-specific connections via Provar environment overrides.", @@ -511,9 +451,7 @@ "category": "TestCaseDesign", "name": "Prefer API for setup where possible", "description": "Test setup steps that only prepare data should use API/SOQL instead of UI flows when feasible.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 3, "recommendation": "Replace UI-based record creation for setup with API or SOQL-based data creation where appropriate.", @@ -530,9 +468,7 @@ "category": "TestCaseDesign", "name": "Use Group Steps or BDD structure for logical phases", "description": "Long tests should be organized using Group Steps, BDD design steps (Given/When/Then/And/But), or Finally blocks that represent logical phases of the flow.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Organize test with Group Steps (e.g. 'Setup', 'Execute Scenario', 'Validate Results'), BDD steps (Given/When/Then), or Finally blocks around related actions.", @@ -550,9 +486,7 @@ "category": "TestCaseDesign", "name": "Disabled test steps should be removed", "description": "Test steps marked with disabled should be removed from the test case to maintain clean, maintainable code. Disabled steps create technical debt and confusion.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Remove disabled test steps entirely. If the step is needed for future use, consider creating a separate test case or adding it when actually required.", @@ -567,9 +501,7 @@ "category": "MaintenanceAndFolders", "name": "Folder-level setup test per application segment", "description": "Each folder representing an application segment must include a setup test case for connections.", - "appliesTo": [ - "Folder" - ], + "appliesTo": ["Folder"], "severity": "minor", "weight": 3, "recommendation": "Add a setup test case that establishes folder-wide connections (even if folder scope bug exists).", @@ -585,9 +517,7 @@ "category": "MaintenanceAndFolders", "name": "Consistent Provar/OS/browser versions", "description": "Execution environments and authors should align on Provar, OS, and browser versions.", - "appliesTo": [ - "Project" - ], + "appliesTo": ["Project"], "severity": "info", "weight": 1, "recommendation": "Document and standardize supported Provar, OS, and browser versions in project configuration.", @@ -602,9 +532,7 @@ "category": "BuildAndCI", "name": "Regression Test Plan exists for CI", "description": "A Regression Test Plan must exist and be referenced by CI builds.", - "appliesTo": [ - "Project" - ], + "appliesTo": ["Project"], "severity": "major", "weight": 5, "recommendation": "Ensure there is a 'Regression' plan and that the CI workflow references it.", @@ -620,9 +548,7 @@ "category": "TestCaseDesign", "name": "Test case has valid identifier", "description": "Test case must have a valid identifier: 'guid' (V3), 'id', or 'registryId' (legacy) attribute.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "critical", "weight": 8, "recommendation": "Add a valid identifier to the element: guid (preferred), id, or registryId attribute.", @@ -637,9 +563,7 @@ "category": "TestCaseDesign", "name": "Test case has steps element", "description": "Test case must contain a element with at least one step.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "critical", "weight": 8, "recommendation": "Add a element containing apiCall steps.", @@ -654,9 +578,7 @@ "category": "ConnectionsAndEnvironments", "name": "UiConnect has invalid arguments (ApexConnect arguments used)", "description": "UiConnect steps have different arguments than ApexConnect. UiConnect does NOT support: autoCleanup, enableObjectIdLogging, quickUiLogin, closeAllPrimaryTabs, alreadyOpenBehaviour, lightningMode, uiApplicationName, cleanupConnectionName. Valid UiConnect arguments are: connectionName, connectionId, resultName, resultScope, reuseConnectionName, privateBrowsingMode, webBrowser.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Remove invalid arguments from UiConnect. For Salesforce connections requiring autoCleanup, use ApexConnect instead of UiConnect. UiConnect is for non-Salesforce UI connections only.", @@ -671,9 +593,7 @@ "category": "ConnectionsAndEnvironments", "name": "Prefer autoCleanup over manual ApexDeleteObject steps", "description": "ApexConnect should use autoCleanup=true instead of manual ApexDeleteObject steps. autoCleanup is preferred because it requires fewer steps, is more reliable (cleanup happens even if test fails), and automatically handles deletion order. Only use manual deletes when records are created indirectly and IDs aren't captured. Note: With multiple ApexConnect steps, autoCleanup only deletes records created using that specific connection's resultName.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Set autoCleanup=true in ApexConnect and remove manual ApexDeleteObject steps. autoCleanup automatically deletes records created via ApexCreateObject and UiWithScreen action=New (when sfUiTargetResultName captures the ID). With multiple connections, each autoCleanup only handles its own connection's records.", @@ -688,9 +608,7 @@ "category": "TestCaseDesign", "name": "First UiWithScreen must use navigate=Always or IfNeccessary", "description": "The first UiWithScreen step must use navigate='Always' or 'IfNeccessary' to ensure proper navigation. Using 'Dont' will cause failures in CLI/debug mode. This rule is skipped for callable tests (visibility='Internal').", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Set navigate='Always' (preferred) or 'IfNeccessary' for the first UiWithScreen step. Never use 'Dont' on the first UiWithScreen unless in a callable test.", @@ -705,9 +623,7 @@ "category": "TestCaseDesign", "name": "First UiWithScreen should prefer navigate=Always over IfNeccessary", "description": "The first UiWithScreen step should use navigate='Always' rather than 'IfNeccessary' for more reliable navigation. While 'IfNeccessary' works, 'Always' provides more consistent behavior. This rule is skipped for callable tests (visibility='Internal').", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Change navigate='IfNeccessary' to navigate='Always' for the first UiWithScreen step for more reliable navigation.", @@ -722,9 +638,7 @@ "category": "TestCaseDesign", "name": "UiWithScreen with navigate=Always for Edit/View must have sfUiTargetObjectId", "description": "When UiWithScreen navigates to an Edit or View screen with navigate='Always', the sfUiTargetObjectId argument must be populated to specify which record to edit or view. Without this, Provar cannot determine which record to navigate to. This does not apply to New screens (which create new records) or List/Home screens.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add sfUiTargetObjectId argument referencing a previously captured record ID variable (e.g., AccountId from a prior UiWithScreen with action=New or ApexCreateObject).", @@ -739,9 +653,7 @@ "category": "NamingConventions", "name": "ApexConnect resultName is unique", "description": "Each ApexConnect step must have a unique resultName for proper connection tracking.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Use unique resultName values like 'ApexConnection', 'ApexConnection2', etc.", @@ -757,9 +669,7 @@ "category": "TestCaseDesign", "name": "testItemId values are whole numbers", "description": "All testItemId attributes must be whole numbers, not decimals.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Use whole numbers for testItemId (1, 2, 3...) not decimals (1.0, 2.5).", @@ -774,9 +684,7 @@ "category": "DataDrivenTesting", "name": "Variable references use correct syntax", "description": "Variable references must use {VarName[1].Field} syntax, not {VarName.1.Field}.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Use {ResultName[index].FieldName} where index starts at 1, not 0.", @@ -792,9 +700,7 @@ "category": "NamingConventions", "name": "Custom fields end with __c", "description": "Custom field references must end with '__c' suffix.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add '__c' suffix to custom field names (e.g. 'CustomField__c').", @@ -809,9 +715,7 @@ "category": "TestCaseDesign", "name": "SOQL queries include Id and Name", "description": "SOQL queries should retrieve Id and Name fields by default.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Add 'Id, Name' to SELECT clause for object queries.", @@ -827,9 +731,7 @@ "category": "TestCaseDesign", "name": "ForEach loops have valid source collection", "description": "ForEach steps must specify a valid source collection to iterate over.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 4, "recommendation": "Add 'list' argument to ForEach step referencing a valid collection variable.", @@ -845,9 +747,7 @@ "category": "TestCaseDesign", "name": "If statements have conditions", "description": "If steps must have a boolean condition expression.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'condition' argument to If step with valid boolean expression.", @@ -863,9 +763,7 @@ "category": "TestCaseDesign", "name": "While loops have exit conditions", "description": "While loops must have a condition to prevent infinite loops.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'condition' argument to While step that will eventually evaluate to false.", @@ -881,9 +779,7 @@ "category": "TestCaseDesign", "name": "ForEach loops have valueName to store current item", "description": "ForEach steps must specify a valueName argument to store the current iteration value.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'valueName' argument to ForEach step to name the variable that will hold each item during iteration.", @@ -899,9 +795,7 @@ "category": "TestCaseDesign", "name": "Switch statements have value expression", "description": "Switch steps must specify a value argument to evaluate against cases.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'value' argument to Switch step with the expression to evaluate.", @@ -917,9 +811,7 @@ "category": "TestCaseDesign", "name": "Sleep steps have duration specified", "description": "Sleep steps must specify sleepSecs argument with the number of seconds to wait.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'sleepSecs' argument to Sleep step with a positive decimal value.", @@ -935,9 +827,7 @@ "category": "TestCaseDesign", "name": "WaitFor steps have condition", "description": "WaitFor steps must specify a condition argument to evaluate each iteration.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'condition' argument to WaitFor step with a boolean expression to wait for.", @@ -953,9 +843,7 @@ "category": "TestCaseDesign", "name": "WaitFor steps have max iterations limit", "description": "WaitFor steps must specify maxIterations to prevent infinite waiting.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add 'maxIterations' argument to WaitFor step to limit wait cycles.", @@ -971,9 +859,7 @@ "category": "TestCaseDesign", "name": "AssertValues has comparisonType", "description": "AssertValues steps must specify a comparisonType argument (EqualTo, Contains, etc.).", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'comparisonType' argument with valid value: EqualTo, NotEqualTo, Contains, NotContains, StartsWith, EndsWith, GreaterThan, LessThan, Matches, IsNull, NotNull.", @@ -989,9 +875,7 @@ "category": "TestCaseDesign", "name": "AssertValues has expectedValue", "description": "AssertValues steps must specify an expectedValue argument containing the value to test.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'expectedValue' argument with the variable or value being verified.", @@ -1007,9 +891,7 @@ "category": "TestCaseDesign", "name": "AssertValues has actualValue", "description": "AssertValues steps must specify an actualValue argument containing the expected result. NOTE: actualValue CAN be empty when using NotEqualTo to check if a string is NOT blank (common pattern for null/blank checks).", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'actualValue' argument with the expected result to compare against. For blank string checks, actualValue can be empty with NotEqualTo comparison.", @@ -1025,9 +907,7 @@ "category": "TestCaseDesign", "name": "Given steps have description", "description": "BDD Given steps must specify a description argument explaining the precondition.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add 'description' argument to Given step with a clear precondition statement.", @@ -1043,9 +923,7 @@ "category": "TestCaseDesign", "name": "When steps have description", "description": "BDD When steps must specify a description argument explaining the action.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add 'description' argument to When step with a clear action statement.", @@ -1061,9 +939,7 @@ "category": "TestCaseDesign", "name": "Then steps have description", "description": "BDD Then steps must specify a description argument explaining the expected outcome.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add 'description' argument to Then step with a clear expected outcome statement.", @@ -1079,9 +955,7 @@ "category": "TestCaseDesign", "name": "ApexSoqlQuery has soqlQuery argument", "description": "ApexSoqlQuery steps must specify a soqlQuery argument with the SOQL statement.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'soqlQuery' argument with valid SOQL statement (e.g., SELECT Id, Name FROM Account).", @@ -1097,9 +971,7 @@ "category": "ConnectionsAndEnvironments", "name": "DbConnect has connectionName", "description": "DbConnect steps must specify a connectionName argument identifying the database connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'connectionName' argument with the database connection identifier from environment settings.", @@ -1115,9 +987,7 @@ "category": "ConnectionsAndEnvironments", "name": "DbConnect has resultName", "description": "DbConnect steps must specify a resultName argument to store the connection reference.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'resultName' argument with the variable name to store the connection reference.", @@ -1133,9 +1003,7 @@ "category": "TestCaseDesign", "name": "SqlQuery has query argument", "description": "SqlQuery steps must specify a query argument with the SQL statement.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'query' argument with valid SQL statement.", @@ -1151,9 +1019,7 @@ "category": "TestCaseDesign", "name": "SqlQuery has dbConnectionName", "description": "SqlQuery steps must specify a dbConnectionName argument referencing the database connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'dbConnectionName' argument with the resultName from a DbConnect step.", @@ -1169,9 +1035,7 @@ "category": "ConnectionsAndEnvironments", "name": "WebConnect has connectionName", "description": "WebConnect steps must specify a connectionName argument identifying the web service connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'connectionName' argument with the web service connection identifier.", @@ -1187,9 +1051,7 @@ "category": "ConnectionsAndEnvironments", "name": "WebConnect has resultName", "description": "WebConnect steps must specify a resultName argument to store the connection reference.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'resultName' argument with the variable name to store the connection reference.", @@ -1205,9 +1067,7 @@ "category": "TestCaseDesign", "name": "RestRequest has connectionName", "description": "RestRequest steps must specify a connectionName argument referencing the web service connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'connectionName' argument with the resultName from a WebConnect step.", @@ -1223,10 +1083,7 @@ "category": "TestCaseDesign", "name": "Variables are defined before use", "description": "Variables referenced in test should be defined via SetValues or result from previous steps.", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "major", "weight": 5, "recommendation": "Define variable with SetValues step or ensure it's populated by previous step (SOQL, CreateObject, etc.).", @@ -1241,9 +1098,7 @@ "category": "ConnectionsAndEnvironments", "name": "Apex API calls reference valid connections", "description": "Apex API operations must reference an existing ApexConnect connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure ApexConnect step exists before Apex API calls and connection names match.", @@ -1258,9 +1113,7 @@ "category": "ConnectionsAndEnvironments", "name": "Database operations reference valid connections", "description": "Database API operations must reference an existing DbConnect connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure DbConnect step exists before database operations and connection names match.", @@ -1275,9 +1128,7 @@ "category": "ConnectionsAndEnvironments", "name": "UI operations reference valid connections", "description": "UI API operations must reference an existing UiConnect connection.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure UiConnect step exists before UI operations and connection names match.", @@ -1292,9 +1143,7 @@ "category": "ConnectionsAndEnvironments", "name": "uiConnectionName must be a literal string", "description": "UiWithScreen/UiDoAction/UiAssert steps must set uiConnectionName to a literal string (the resultName from UiConnect/ApexConnect). Using for uiConnectionName causes Provar to error.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Set uiConnectionName using AdminConnection (not a variable reference).", @@ -1309,9 +1158,7 @@ "category": "TestCaseDesign", "name": "SOQL queries have SELECT and FROM clauses", "description": "Valid SOQL queries must contain both SELECT and FROM clauses.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure SOQL query has format: SELECT FROM ", @@ -1327,9 +1174,7 @@ "category": "TestCaseDesign", "name": "SOQL queries include WHERE or LIMIT clause", "description": "SOQL queries must have WHERE clause or LIMIT to prevent unreasonably large result sets and potential heap size errors. This prevents Apex governance violations and performance issues.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add WHERE clause to filter results (e.g. WHERE Id = :recordId) or LIMIT clause to cap result size (e.g. LIMIT 100). For large data volumes, always include both WHERE and LIMIT.", @@ -1345,9 +1190,7 @@ "category": "TestCaseDesign", "name": "SOQL queries must not be inside loops", "description": "SOQL queries inside For/ForEach/While/DoWhile loops violate Apex governor limits. Each SOQL query execution consumes from the 100 SOQL query limit per transaction. Queries in loops can quickly exceed this limit causing 'Too many SOQL queries: 101' errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Move SOQL query outside the loop and store results in a collection. Then iterate over the collection. Use SOQL WHERE clauses to filter in the query rather than filtering in a loop. Consider using aggregate queries or bulk processing patterns.", @@ -1363,9 +1206,7 @@ "category": "ApexAPI", "name": "Apex API steps must reference a valid connection", "description": "All Apex API calls (ApexCreate, ApexUpdate, ApexDelete, ApexRead, ApexSoqlQuery) must reference a valid Salesforce connection defined earlier in the test using ApexConnect. Tests will fail if connection is missing, empty, or references an undefined connection name.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add an ApexConnect step before any Apex API calls to establish a Salesforce connection. Reference the connection name in all subsequent Apex API calls. Use variables for connection names if dynamically determined.", @@ -1380,9 +1221,7 @@ "category": "ApexAPI", "name": "Apex CRUD operations must have valid object types", "description": "ApexCreate, ApexUpdate, ApexDelete, and ApexRead steps must specify a valid Salesforce object type (e.g., Account, Contact, Broker__c). Object type must start with capital letter and contain only alphanumeric characters and underscores. Invalid or missing object types cause test failures.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Specify the correct Salesforce object API name (e.g., Account, Contact, Custom__c). Use standard object names or custom object API names ending with __c. Verify object exists in the target Salesforce org.", @@ -1397,9 +1236,7 @@ "category": "ApexAPI", "name": "ApexUpdateObject must have valid record ID", "description": "ApexUpdateObject requires a valid Salesforce record ID (15 or 18 characters) or a variable reference containing the ID. Missing, empty, or invalid record IDs cause test failures. Record IDs must be alphanumeric Salesforce IDs.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Provide a valid 15 or 18 character Salesforce record ID, or use a variable like {AccountId} that contains a valid ID from a prior Create or Read operation. Verify the record exists before attempting update.", @@ -1414,9 +1251,7 @@ "category": "ApexAPI", "name": "ApexUpdateObject must specify fields to update", "description": "ApexUpdateObject steps should specify at least one field to update in the field definitions. Update operations without field specifications have no effect and represent incomplete test logic. System arguments (objectType, recordId, objectId, apexConnectionName, resultScope, assignmentRuleId, resultIdName) are excluded from this check.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add field definitions to the ApexUpdateObject step and provide values for at least one field to update. For example, specify Name, Status, or other relevant fields with test data or variable references.", @@ -1431,9 +1266,7 @@ "category": "ApexAPI", "name": "ApexDeleteObject must have valid record ID", "description": "ApexDeleteObject requires a valid Salesforce record ID (15 or 18 characters) or a variable reference containing the ID. Missing, empty, or invalid record IDs cause test failures. Record IDs must be alphanumeric Salesforce IDs.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Provide a valid 15 or 18 character Salesforce record ID, or use a variable like {AccountId} that contains a valid ID from a prior Create or Read operation. Verify the record exists before attempting delete.", @@ -1448,9 +1281,7 @@ "category": "ApexAPI", "name": "ApexReadObject must have valid record ID", "description": "ApexReadObject requires a valid Salesforce record ID (15 or 18 characters) or a variable reference containing the ID. Missing, empty, or invalid record IDs cause test failures. Record IDs must be alphanumeric Salesforce IDs.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Provide a valid 15 or 18 character Salesforce record ID, or use a variable like {AccountId} that contains a valid ID from a prior Create operation. Verify the record exists before attempting read.", @@ -1465,9 +1296,7 @@ "category": "ApexAPI", "name": "ApexCreateObject with fields must populate at least one field", "description": "When ApexCreateObject includes a fields structure, at least one field must be populated with a value. Empty field structures indicate incomplete test implementation and may cause test failures or create records with default values only.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Populate at least one field with a value or variable reference in the ApexCreateObject fields structure. Remove the fields structure if not needed. Verify required fields for the object are populated.", @@ -1482,9 +1311,7 @@ "category": "ApexAPI", "name": "ApexCreateObject and ApexUpdateObject must include parameter metadata", "description": "ApexCreateObject and ApexUpdateObject steps MUST include and sections after . These metadata sections enable Provar IDE features like field auto-complete, field suggestions, and proper object binding. Missing metadata causes degraded IDE experience and prevents field discovery.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add with ConnectionName and CustomObjectName properties, and with apiParam elements for each field used in arguments. Study existing tier4 examples to see the exact structure required.", @@ -1499,9 +1326,7 @@ "category": "ApexAPI", "name": "ApexUpdateObject/ApexCreateObject field arguments must be direct, not nested in uiObjectFieldValue", "description": "ApexUpdateObject and ApexCreateObject steps MUST use direct field arguments (e.g., ) instead of nesting fields within uiObjectFieldValue elements inside the values argument. The values argument should contain only an empty valueList. Nested uiObjectFieldValue structures cause runtime failures and prevent proper field serialization.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use direct field arguments: FieldValue with a separate empty . Do NOT nest fields inside uiObjectFieldValue within the values argument.", @@ -1516,9 +1341,7 @@ "category": "ApexAPI", "name": "ApexReadObject must use generatedParameters for field references, not fields argument with textType", "description": "ApexReadObject steps should NOT use a 'fields' argument containing textType elements for field names. Instead, fields must be properly referenced via generatedParameters with modelBinding attributes. The incorrect pattern uses FieldName which prevents proper field metadata and IDE integration. The correct pattern includes proper with elements.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Remove the 'fields' argument with textType elements. Ensure each field has a corresponding entry in with proper modelBinding. Fields should be declared through generatedParameters, not through a fields argument.", @@ -1533,9 +1356,7 @@ "category": "XMLSchema", "name": "Apex CRUD apiParam elements must be self-closing without summary/type children", "description": "In ApexCreateObject, ApexUpdateObject, and ApexReadObject steps, elements within MUST be self-closing tags without child elements. LLMs commonly hallucinate and elements inside apiParam, which causes Provar to reject the test case as invalid XML. The correct pattern is: . The hallucinated pattern includes description and ... children which are ONLY valid in other contexts (RestRequest, UiDoAction) but NEVER in Apex CRUD operations.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Remove all and child elements from apiParam elements in ApexCreateObject, ApexUpdateObject, and ApexReadObject steps. Use self-closing tags. For example: ", @@ -1551,9 +1372,7 @@ "category": "ApexAPI", "name": "ApexReadObject should use resultAssertions instead of separate AssertValues", "description": "When asserting field values after ApexReadObject, use within the ApexReadObject step rather than separate AssertValues API calls. This pattern is more efficient, provides better error messages, and maintains field metadata associations. The resultAssertions element should contain children with comparisonType, resultName, title, and optional elements.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 3, "recommendation": "Add block within ApexReadObject after the section. Move AssertValues logic into elements with appropriate comparisonType (EqualTo, IsPresent, NotEqualTo, etc.) and expected values where applicable.", @@ -1568,9 +1387,7 @@ "category": "ApexAPI", "name": "ApexExtractLayout must have object, file type, and path", "description": "ApexExtractLayout requires three parameters: object (Salesforce object API name), fileType (.xlsx only supported), and filePath (where to save the layout). Missing any parameter causes test failure. Only .xlsx file format is supported for layout extraction.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Specify the Salesforce object API name, set fileType to .xlsx, and provide a valid file path for the extracted layout. Ensure the directory path exists and is writable. Use relative paths for portability.", @@ -1585,9 +1402,7 @@ "category": "ApexAPI", "name": "ApexAssertLayout must have object and expected file", "description": "ApexAssertLayout requires two parameters: object (Salesforce object API name) and expectedFile (path to expected .xlsx layout file). Missing either parameter causes test failure. Only .xlsx file format is supported for layout assertions.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Specify the Salesforce object API name and provide path to expected .xlsx layout file. Ensure the expected file exists and is in .xlsx format. Use ApexExtractLayout to generate baseline layouts.", @@ -1602,9 +1417,7 @@ "category": "ApexAPI", "name": "Connection arguments must use correct naming convention", "description": "Different step types require specific connection argument names: 'connectionName' for Connect steps (ApexConnect, UiConnect, DbConnect, WebConnect) and web requests (RestRequest, SoapRequest); 'apexConnectionName' for Apex CRUD/SOQL operations (ApexCreateObject, ApexReadObject, ApexUpdateObject, ApexDeleteObject, ApexSoqlQuery, ApexBulk, etc.); 'uiConnectionName' for UI steps (UiWithScreen, UiDoAction, UiAssert); 'dbConnectionName' for database operations (DbDelete, DbInsert, DbRead, DbUpdate, SqlQuery). Using incorrect connection argument names causes connection lookup failures.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Use the correct connection argument name for each step type. Connect steps and web requests use 'connectionName'. Apex CRUD/SOQL operations use 'apexConnectionName'. UI steps use 'uiConnectionName'. Database operations use 'dbConnectionName'. Review Provar API documentation for the specific step type being used.", @@ -1619,9 +1432,7 @@ "category": "ApexAPI", "name": "Apex CRUD operations should include parameterGeneratorUri", "description": "ApexCreateObject, ApexReadObject, and ApexUpdateObject steps should include the parameterGeneratorUri attribute for proper IDE support. This attribute enables field auto-complete, field suggestions, and proper object binding in Provar IDE. Missing this attribute degrades IDE experience and may cause issues in some Provar versions.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Add parameterGeneratorUri attribute to Apex CRUD steps: ApexCreateObject uses 'command:com.provar.plugins.forcedotcom.ui.commands.CreateCustomObjectTestStepCommand', ApexReadObject uses 'command:com.provar.plugins.forcedotcom.ui.commands.ReadCustomObjectTestStepCommand', ApexUpdateObject uses 'command:com.provar.plugins.forcedotcom.ui.commands.UpdateCustomObjectTestStepCommand'.", @@ -1636,9 +1447,7 @@ "category": "TestCaseDesign", "name": "AssertValues arguments must be in correct order", "description": "AssertValues steps should contain the variable being tested (from Read/Query operations) and actualValue containing the expected literal value. Reversed arguments cause assertion logic to be backwards - comparing literals against variables instead of variables against expected values. This can result in unclear test step outcomes, although results are the same.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Correct the argument order: expectedValue should contain the variable reference (e.g., {OpportunityRead.Name}), actualValue should contain the expected literal value (e.g., 'Demo Opportunity'). The assertion logic is: 'Assert that expectedValue equals actualValue', which reads as 'Assert that {variable} equals literal'.", @@ -1653,9 +1462,7 @@ "category": "TestCaseDesign", "name": "Must use AssertValues API, not deprecated Assert API", "description": "Test cases must use the supported com.provar.plugins.bundled.apis.AssertValues API with expectedValue/actualValue/comparisonType arguments. The com.provar.plugins.bundled.core.testapis.Assert API is not supported and will not load properly in Provar. AssertValues provides more flexible comparison operators and clearer assertion semantics.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Replace com.provar.plugins.bundled.core.testapis.Assert steps with com.provar.plugins.bundled.apis.AssertValues. Use expectedValue (the variable being tested), comparisonType (EqualTo, NotEqualTo, etc.), actualValue (the expected result), and failureMessage (explanation of what failed).", @@ -1670,9 +1477,7 @@ "category": "TestCaseDesign", "name": "AssertValues should have meaningful expected values", "description": "AssertValues steps using comparison operators (EqualTo, Contains, GreaterThan, etc.) should specify meaningful expectedValue arguments. Empty or missing expected values defeat the purpose of assertions and provide no validation. Without expected values, assertions cannot verify correct behavior. Comparison types requiring values: EqualTo, NotEqualTo, Contains, NotContains, StartsWith, NotStartsWith, EndsWith, NotEndsWith, Matches, NotMatches, GreaterThan, LessThan, GreaterThanOrEqualTo, LessThanOrEqualTo.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Specify a clear expectedValue for the assertion based on requirements. Use test data variables or literals that represent the expected state. Empty expected values provide no validation. For example: expectedValue='{Account.Name}', comparisonType='EqualTo', actualValue='ACME Corporation'.", @@ -1681,16 +1486,14 @@ "target": "step", "apiId": "com.provar.plugins.bundled.apis.AssertValues" }, - "source": "Provar Test Step Schema: Meaningful Assertions (Phase 2 API Usage Analysis)" + "source": "Provar Test Step Schema: Meaningful Assertions (API Usage Analysis)" }, { "id": "CONTROL-SLEEP-001", "category": "TestCaseDesign", "name": "Sleep step duration and frequency issues", "description": "Sleep steps with duration > 10 seconds cause unnecessary test execution time (major). Sleep inside loops multiplies wait time and indicates polling anti-pattern (minor). More than 5 sleeps per 50 steps indicates excessive waiting rather than proper synchronization (minor).", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Replace long sleeps with explicit waits for conditions. Move sleeps outside loops and use proper wait-until-ready patterns. Replace multiple sleeps with single targeted waits. Use UI wait conditions, API polling with timeout, or event-driven synchronization.", @@ -1705,9 +1508,7 @@ "category": "TestCaseDesign", "name": "While loop must have termination condition", "description": "While loops without counterEnd and without a terminating condition (<=, <, >=, >, ==, !=) can run infinitely if no termination is defined (major). While loops with condition='true' or '{true}' are infinite loops that rely solely on break statements, making them difficult to debug (minor). A loop is valid if it has either counterEnd OR a comparison condition.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Either set counterEnd to a reasonable limit (e.g., 10, 50, 100) OR use a terminating condition with a comparison operator (e.g., {Counter <= MaxIterations}). Avoid while{true} - use a meaningful condition instead. Add timeout handling and clear exit conditions. Consider using For loop with fixed iteration count if possible.", @@ -1722,9 +1523,7 @@ "category": "TestCaseDesign", "name": "Finally block must have description and be at end", "description": "Finally blocks without descriptions make it unclear what cleanup actions are performed (major). Finally blocks not at the end of the test may not execute if test stops before reaching them (minor). Finally is meant for cleanup that always runs regardless of test outcome.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add clear description to Finally block explaining what cleanup actions are performed (e.g., 'Delete test data', 'Logout'). Move Finally block to the end of the test to ensure cleanup runs. Use Finally for critical cleanup like deleting test records or closing connections.", @@ -1739,9 +1538,7 @@ "category": "TestCaseDesign", "name": "SOQL queries specify resultListName", "description": "SOQL queries must specify resultListName to capture query results.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'resultListName' argument to store query results.", @@ -1757,9 +1554,7 @@ "category": "TestCaseDesign", "name": "Boolean values are 'true' or 'false'", "description": "Boolean value elements must contain exactly 'true' or 'false' (lowercase).", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Set boolean value to 'true' or 'false' (not 'True', 'FALSE', '1', '0', etc.).", @@ -1774,9 +1569,7 @@ "category": "TestCaseDesign", "name": "Numeric values are valid numbers", "description": "DISABLED: Rule generates false positives. Flags numeric-looking strings in SetValues without checking Salesforce field types. Many text fields (ZIP codes, IDs with leading zeros, year fields) legitimately store numeric strings.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 0, "recommendation": "Review if this value should be valueClass='decimal' for calculations, or valueClass='string' for text fields like IDs, ZIP codes, phone numbers. Consider field type in Salesforce.", @@ -1791,9 +1584,7 @@ "category": "TestCaseDesign", "name": "SetValues steps have namedValues container", "description": "SetValues steps must have container element.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add container inside values argument.", @@ -1809,9 +1600,7 @@ "category": "NamingConventions", "name": "SetValues namedValue elements have name attribute", "description": "Each namedValue element must have a 'name' attribute defining the variable name.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'name' attribute to each element.", @@ -1827,9 +1616,7 @@ "category": "TestCaseDesign", "name": "SetValues namedValue elements have value element", "description": "Each namedValue must contain a element defining the variable value.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add element inside each .", @@ -1845,9 +1632,7 @@ "category": "StructureAndGrouping", "name": "SetValues must not contain invalid child elements", "description": "SetValues steps must use the correct structure with containing elements. AI models sometimes hallucinate invalid elements like , , or nested incorrectly. The correct structure is: .........", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Use correct SetValues structure: VariableName...Test. Each variable requires one block with three children: valuePath, value, and valueScope.", @@ -1865,9 +1650,7 @@ "category": "TestCaseDesign", "name": "Variable property references must be valid", "description": "Variable property references () must reference valid properties. For SOQL query results, only fields in the SELECT clause are valid. Common hallucinations include: .size, .length, .count (use Count() function), duplicate path elements, and referencing fields not in the query.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 6, "recommendation": "For list size, use Count(variable) function. For properties, ensure the field is included in the SOQL SELECT clause or ApexReadObject generatedParameters. Remove duplicate path elements.", @@ -1884,9 +1667,7 @@ "category": "TestCaseDesign", "name": "Manual cleanup matches object creation", "description": "When autoCleanup=false, test should have ApexDeleteObject steps matching ApexCreateObject steps.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "major", "weight": 5, "recommendation": "Add ApexDeleteObject steps for created objects or set autoCleanup=true.", @@ -1901,9 +1682,7 @@ "category": "TestCaseDesign", "name": "Cleanup deletes objects in reverse order", "description": "When manually deleting objects, delete child objects before parent objects.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 2, "recommendation": "Reorder ApexDeleteObject steps: delete child records before parent records.", @@ -1918,9 +1697,7 @@ "category": "TestCaseDesign", "name": "ApexCreateObject steps specify resultIdName", "description": "ApexCreateObject should specify resultIdName to capture created record ID for later reference or cleanup.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add 'resultIdName' argument to store created record ID (e.g. 'CreatedAccountId', 'OpportunityId').", @@ -1936,9 +1713,7 @@ "category": "ReusabilityAndCallables", "name": "Called test cases are marked as Callable", "description": "Test cases invoked via TestCaseCall must have visibility='Callable' attribute.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Set visibility='Callable' on called test case or reference a callable test.", @@ -1954,9 +1729,7 @@ "category": "TestCaseDesign", "name": "Log messages use appropriate log levels", "description": "Log steps should use appropriate log levels: INFO for general info, WARN for warnings, ERROR for errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Set logLevel argument to appropriate value: INFO, WARN, ERROR, or DEBUG.", @@ -1972,9 +1745,7 @@ "category": "TestCaseDesign", "name": "UiAssert steps specify assertion type", "description": "UiAssert steps should specify assertionType (Visible, Value, Enabled, etc.) for clarity.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Add 'assertionType' argument specifying the type of assertion.", @@ -1990,9 +1761,7 @@ "category": "TestCaseDesign", "name": "UiDoAction Set requires value argument", "description": "UiDoAction steps with interactionType='Set' must have a value argument.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add 'value' argument when using Set interaction type.", @@ -2008,9 +1777,7 @@ "category": "TestCaseDesign", "name": "Ui locator built-in actions use object binding", "description": "For Salesforce built-in actions (New, Edit, Save, Convert, etc.), uiLocator URIs must use an object binding (sf:ui:binding:object?object=...&action=...). Using the action binding form (sf:ui:binding:action?actionName=...) is known to cause runtime execution failures in generated tests.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use an object-binding locator like: ui:locator?name=&binding=sf%3Aui%3Abinding%3Aobject%3Fobject%3D%26action%3D (replace object/action accordingly).", @@ -2026,9 +1793,7 @@ "category": "TestCaseDesign", "name": "UiAssert fieldLocator uses object+field binding", "description": "UiAssert embedded field assertions (uiFieldAssertion) must use a fieldLocator with an object binding (sf:ui:binding:object?field=&object=) and a locator name matching the field (name=). Incorrect fieldLocator bindings can cause runtime failures or assertions targeting the wrong element.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use one of: (1) sf:ui:locator?name=&binding=sf%3Aui%3Abinding%3Aobject%3Ffield%3D%26object%3D (ensure name and field match), (2) ui:pageobject:field?field=&pageId=pageobjects., or (3) ui:locator?name= for Page Object locator references.", @@ -2044,9 +1809,7 @@ "category": "TestCaseDesign", "name": "UiAssert fieldAssertion must not wrap fieldLocator in uiLocator", "description": "UiAssert embedded field assertions (uiFieldAssertion) must use fieldLocator with a uri attribute directly. DO NOT wrap the locator in a nested element. The correct format is not . This incorrect nesting causes runtime failures.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use directly without wrapping in .", @@ -2062,9 +1825,7 @@ "category": "TestCaseDesign", "name": "UiAssert bare locator in Salesforce metadata context causes render failure", "description": "UiAssert uiFieldAssertion using a bare locator reference (ui:locator?name=X without a binding parameter) inside a UiWithScreen whose target is a Salesforce metadata context (sf:ui:target?object=...) will fail to render in Provar Automation. Bare locators are only valid inside Page Object contexts. In Salesforce metadata contexts, the fieldLocator must include a full binding (ui:locator?name=X&binding=sf%3Aui%3Abinding%3Aobject%3Fobject%3D...%26field%3D...). This commonly occurs when asserting button visibility (e.g., Run, Edit, Delete buttons) as uiFieldAssertions — buttons are not Salesforce fields and cannot be asserted this way.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Remove the bare-locator uiFieldAssertion for non-field elements (buttons, actions). If you need to verify a button exists, use a UiDoAction click interaction instead of a UiAssert fieldAssertion. For Salesforce field assertions, always include the full binding: . NEVER use bare ui:locator?name=X without a binding inside a Salesforce metadata UiWithScreen.", @@ -2080,9 +1841,7 @@ "category": "TestCaseDesign", "name": "UiDoAction locator URIs must use valid patterns", "description": "Validates all UiDoAction locator URI patterns including: (1) SF locators: ui:locator?name=...&binding=sf:ui:binding:object?... with required name and binding params, (2) Page Object Fields: ui:pageobject:field?field=...&pageId=pageobjects... with required field and pageId params, (3) Page Object Methods: ui:pageobject:method?pageId=...&operation=... with required pageId and operation params, (4) Lead Conversion context-aware patterns (Convert on View vs submitConvert in Dialog). Detects malformed URIs like '%3Action' which should be '%3Faction'.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "SF locators: ui:locator?name=&binding=sf%3Aui%3Abinding%3Aobject%3F. Page Object Fields: ui:pageobject:field?field=&pageId=pageobjects.. Page Object Methods: ui:pageobject:method?pageId=pageobjects.&operation=. Ensure '%3F' (?) precedes query params in bindings, not '%3A' (colon).", @@ -2099,9 +1858,7 @@ "category": "TestCaseDesign", "name": "UiWithScreen target URIs must use valid patterns", "description": "Validates all UiWithScreen target URI patterns including: (1) SF targets with valid parameters (object/action, lightningComponent, lightningWebComponent, auraComponent, application/tab, lookup, fieldService, action-only), (2) Page Object targets: ui:pageobject:target?pageId=pageobjects... with required pageId param starting with 'pageobjects.' prefix.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "SF Object-Action targets: sf:ui:target?object=&action=. SF Lightning targets: sf:ui:target?lightningComponent=. Page Object targets: ui:pageobject:target?pageId=pageobjects..", @@ -2118,9 +1875,7 @@ "category": "TestCaseDesign", "name": "ApexConnect reuseConnectionName should be left blank", "description": "The reuseConnectionName argument in ApexConnect steps should be left empty/blank. AI models may hallucinate invalid values like 'Reuse' which cause runtime errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Leave reuseConnectionName blank: or . Do not use string values.", @@ -2137,9 +1892,7 @@ "category": "XMLSchema", "name": "ApexConnect - Only valid argument IDs allowed", "description": "ApexConnect steps must use ONLY the 21 valid argument IDs defined in Provar schema. AI models commonly hallucinate plausible-sounding but invalid arguments like autoPopulateRequiredFields, assertObjectFieldsPopulated, commandTimeout which cause Provar to reject the test case.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Only use these 21 valid ApexConnect argument IDs: connectionName, resultName, resultScope, uiApplicationName, quickUiLogin, closeAllPrimaryTabs, reuseConnectionName, alreadyOpenBehaviour, autoCleanup, cleanupConnectionName, logFileLocation, connectionId, enableObjectIdLogging, privateBrowsingMode, lightningMode, username, password, securityToken, environment, webBrowser. If you don't have a value, leave the argument empty: rather than inventing values.", @@ -2178,9 +1931,7 @@ "category": "XMLSchema", "name": "ApexConnect connectionId must use valueClass='id'", "description": "The connectionId argument in ApexConnect steps MUST use valueClass='id' with a GUID format value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx), NOT valueClass='string'. Using valueClass='string' breaks connection references in Provar.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Use correct format: bce7fd3f-0f81-4c5c-ab68-c3edd44b5d1e. NEVER use valueClass=\"string\" or value=\"default\". If you don't have a specific GUID, leave the argument empty: ", @@ -2197,9 +1948,7 @@ "category": "TestCaseDesign", "name": "Wait arguments must use uiWait value class", "description": "The beforeWait, afterWait, and autoRetry arguments in UiDoAction and UiAssert steps must use class=\"uiWait\" with a uri attribute. Using valueClass=\"boolean\" is incorrect and causes runtime errors.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use uiWait class with uri: or . Valid uri patterns: default, NoWait, ui:wait:timed?seconds=N, ui:wait:autoRetry:timeout=N, ui:wait:auraBusy?timeout=N, ui:wait:pageload?timeout=N.", @@ -2216,9 +1965,7 @@ "category": "TestCaseDesign", "name": "Save button locator must use correct pattern", "description": "Save button locators in UiDoAction steps must use lowercase 'save' and correct URI parameter order. Common error: using 'Save' (capital S) with action before object, or using %3A instead of %3F after object?", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use correct pattern: name=save (lowercase), binding=sf%3Aui%3Abinding%3Aobject%3Fobject%3D%26action%3Dsave. Common mistakes: (1) Capital S in 'Save', (2) action before object in binding, (3) using %3Action instead of %3Faction, (4) using %3A instead of %3F after 'object?'", @@ -2235,9 +1982,7 @@ "category": "TestCaseDesign", "name": "UiWithScreen target uses invalid action value", "description": "The action parameter in sf:ui:target URIs must use valid Provar action names. AI models commonly hallucinate 'action=Home' when the correct value is 'action=ObjectHome'.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Use valid action values: ObjectHome (for object list/home), New, View, Edit, List, Convert, GlobalSearch, QuickAction, etc. Common fixes: Home→ObjectHome, Create→New, Update→Edit, Details→View.", @@ -2254,9 +1999,7 @@ "category": "TestCaseDesign", "name": "Sleep duration should be under 5 seconds", "description": "Fixed sleep durations should be kept short (<5 seconds). Prefer WaitFor with condition over fixed Sleep.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Use WaitFor with condition instead of Sleep, or reduce sleep duration to <5 seconds.", @@ -2273,9 +2016,7 @@ "category": "StructureAndGrouping", "name": "Finally block should be at end of test", "description": "Finally blocks should be placed at the end of the test to ensure cleanup always executes.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Move Finally block to end of test case.", @@ -2291,9 +2032,7 @@ "category": "StructureAndGrouping", "name": "BDD scenario should start with Given", "description": "BDD test cases should start with Given step to establish preconditions.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Start BDD scenario with Given step.", @@ -2308,9 +2047,7 @@ "category": "StructureAndGrouping", "name": "BDD steps should follow logical order", "description": "BDD steps should follow Given->When->Then order (with And/But as connectors).", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Reorder BDD steps to follow Given->When->Then pattern.", @@ -2325,9 +2062,7 @@ "category": "StructureAndGrouping", "name": "Limit And/But chain length", "description": "Chains of And/But steps should be limited to 3-5 for readability.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "info", "weight": 1, "recommendation": "Break long And/But chains into separate scenarios.", @@ -2343,9 +2078,7 @@ "category": "TestCaseDesign", "name": "ApexExecute code should be valid Apex syntax", "description": "Anonymous Apex code blocks should be syntactically valid.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Validate Apex syntax before execution. Test in Developer Console first.", @@ -2361,9 +2094,7 @@ "category": "TestCaseDesign", "name": "ApexBulk should be used for large data volumes", "description": "Use ApexBulk for operations with more than 200 records.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Use ApexBulk instead of individual CRUD operations for >200 records.", @@ -2378,9 +2109,7 @@ "category": "TestCaseDesign", "name": "Prefer UiWithScreen over UiNavigate for Salesforce", "description": "For Salesforce navigation, prefer UiWithScreen over direct URL navigation with UiNavigate.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Use UiWithScreen for Salesforce screens instead of UiNavigate.", @@ -2396,9 +2125,7 @@ "category": "TestCaseDesign", "name": "Verify fields after UiFill", "description": "UiFill should be followed by UiAssert to verify all fields were filled correctly.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Add UiAssert steps after UiFill to verify field values.", @@ -2414,9 +2141,7 @@ "category": "TestCaseDesign", "name": "UiHandleAlert should capture alert text", "description": "When handling alerts with GetText action, store the text in a result variable.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Set resultName when using GetText action on UiHandleAlert.", @@ -2432,9 +2157,7 @@ "category": "TestCaseDesign", "name": "Replace searchString should not be empty", "description": "Replace step searchString parameter should not be empty string.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Provide non-empty searchString for Replace operation.", @@ -2450,9 +2173,7 @@ "category": "TestCaseDesign", "name": "Split delimiter should not be empty", "description": "Split step delimiter parameter should not be empty string.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Provide non-empty delimiter for Split operation.", @@ -2468,9 +2189,7 @@ "category": "TestCaseDesign", "name": "Match regex pattern should be valid", "description": "When using isRegex=true, pattern must be valid regular expression.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Validate regex pattern syntax before using in Match step.", @@ -2486,19 +2205,14 @@ "category": "TestCaseDesign", "name": "DbDelete and DbUpdate should have WHERE clause", "description": "Database delete and update operations should include WHERE clause to avoid affecting all records.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add WHERE clause to DbDelete/DbUpdate to limit affected records.", "check": { "type": "dbWhereClause", "target": "step", - "apiIds": [ - "com.provar.plugins.bundled.apis.db.DbDelete", - "com.provar.plugins.bundled.apis.db.DbUpdate" - ] + "apiIds": ["com.provar.plugins.bundled.apis.db.DbDelete", "com.provar.plugins.bundled.apis.db.DbUpdate"] }, "source": "Database Testing Best Practices" }, @@ -2507,9 +2221,7 @@ "category": "TestCaseDesign", "name": "RestRequest method should be valid HTTP method", "description": "RestRequest method must be one of: GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Use valid HTTP method (GET, POST, PUT, DELETE, PATCH).", @@ -2517,15 +2229,7 @@ "type": "restHttpMethod", "target": "step", "apiId": "com.provar.plugins.bundled.apis.restservice.RestRequest", - "validMethods": [ - "GET", - "POST", - "PUT", - "DELETE", - "PATCH", - "HEAD", - "OPTIONS" - ] + "validMethods": ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"] }, "source": "REST API Testing Best Practices" }, @@ -2534,9 +2238,7 @@ "category": "TestCaseDesign", "name": "POST/PUT/PATCH should have request body", "description": "RestRequest with POST, PUT, or PATCH method should include request body.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Add request body for POST/PUT/PATCH operations.", @@ -2552,9 +2254,7 @@ "category": "TestCaseDesign", "name": "Validate REST response status", "description": "RestRequest should be followed by assertion to validate response status code.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Add Assert step to validate REST response status code.", @@ -2570,9 +2270,7 @@ "category": "TestCaseDesign", "name": "SOAP request body should be well-formed XML", "description": "WebServiceRequest requestBody must be valid XML for SOAP operations.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Validate SOAP XML syntax before execution.", @@ -2588,9 +2286,7 @@ "category": "TestCaseDesign", "name": "AIAgentSession requires WebConnect first", "description": "AIAgentSession step must be preceded by WebConnect step.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add WebConnect step before AIAgentSession.", @@ -2606,9 +2302,7 @@ "category": "TestCaseDesign", "name": "AIAgentConversation requires valid session", "description": "AIAgentConversation must reference sessionID from AIAgentSession.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure AIAgentSession is called first and sessionID is stored.", @@ -2624,9 +2318,7 @@ "category": "TestCaseDesign", "name": "GenerateUtterance count should be reasonable", "description": "GenerateUtterance count parameter should be between 1-100 for performance.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Set GenerateUtterance count between 1-100.", @@ -2644,9 +2336,7 @@ "category": "TestCaseDesign", "name": "ImageValidator confidence should be 0.0-1.0", "description": "ImageValidator confidenceThreshold must be between 0.0 and 1.0.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Set confidenceThreshold between 0.0 and 1.0.", @@ -2664,9 +2354,7 @@ "category": "TestCaseDesign", "name": "ExtractSalesforceLayout before AssertSalesforceLayout", "description": "AssertSalesforceLayout should be preceded by ExtractSalesforceLayout to create baseline. Otherwise ensure you have a compatible data source to compare to.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 2, "recommendation": "Run ExtractSalesforceLayout first to create baseline layout file.", @@ -2682,9 +2370,7 @@ "category": "TestCaseDesign", "name": "ConvertLead status must be valid", "description": "ConvertLead convertedStatus must be a valid lead conversion status.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Use valid lead conversion status (Converted, Qualified, etc.).", @@ -2700,9 +2386,7 @@ "category": "TestCaseDesign", "name": "Read dataUrl should be valid file path", "description": "Read step dataUrl must point to existing Excel/CSV file.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Verify file exists before Read operation.", @@ -2718,9 +2402,7 @@ "category": "TestCaseDesign", "name": "Write dataUrl should be writable", "description": "Write step dataUrl directory must be writable.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Ensure output directory exists and is writable.", @@ -2736,9 +2418,7 @@ "category": "TestCaseDesign", "name": "Subscribe before ReceiveMessage", "description": "ReceiveMessage should be preceded by Subscribe to topic/channel.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Add Subscribe step before ReceiveMessage.", @@ -2754,9 +2434,7 @@ "category": "TestCaseDesign", "name": "ReceiveMessage timeout should be reasonable", "description": "ReceiveMessage timeout should be between 5-60 seconds.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "info", "weight": 1, "recommendation": "Set timeout between 5-60 seconds.", @@ -2774,10 +2452,7 @@ "category": "StructureAndGrouping", "name": "All arguments must have value elements", "description": "Every element must contain a child element, even if empty. Missing elements prevent test cases from loading in Provar.", - "appliesTo": [ - "TestCase", - "Step" - ], + "appliesTo": ["TestCase", "Step"], "severity": "critical", "weight": 10, "recommendation": "Add (or appropriate type) to all elements.", @@ -2793,10 +2468,7 @@ "category": "StructureAndGrouping", "name": "valueClass attributes must use lowercase", "description": "All valueClass attributes must use lowercase values (e.g., 'boolean' not 'Boolean', 'string' not 'String'). Incorrect casing prevents test cases from loading in Provar.", - "appliesTo": [ - "TestCase", - "Step" - ], + "appliesTo": ["TestCase", "Step"], "severity": "critical", "weight": 10, "recommendation": "Change all valueClass attributes to lowercase: boolean, string, decimal, integer, date, datetime, variable, compound, funcCall, value, valueList.", @@ -2811,10 +2483,7 @@ "category": "StructureAndGrouping", "name": "Boolean values must use lowercase", "description": "Boolean values in elements must be lowercase 'true' or 'false', not 'True', 'False', 'TRUE', or 'FALSE'. Incorrect casing prevents test cases from loading in Provar.", - "appliesTo": [ - "TestCase", - "Step" - ], + "appliesTo": ["TestCase", "Step"], "severity": "critical", "weight": 10, "recommendation": "Change all boolean values to lowercase: true, false.", @@ -2829,9 +2498,7 @@ "category": "StructureAndGrouping", "name": "Test case root element should not have unknown attributes", "description": "The root element should only contain known attributes: guid, id, name, visibility, registryId, failureBehaviour. Unknown attributes may prevent test cases from loading in Provar.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 3, "recommendation": "Remove unknown attributes from the root element.", @@ -2846,9 +2513,7 @@ "category": "TestCaseDesign", "name": "Test case should not be excessively long", "description": "Test cases with more than 150 steps become difficult to maintain, debug, and understand. Consider breaking long tests into smaller, focused test cases or using callable test cases.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "minor", "weight": 3, "recommendation": "Break test into smaller test cases or extract common logic into callable test cases.", @@ -2863,10 +2528,7 @@ "category": "TestCaseDesign", "name": "Picklist values should match Salesforce metadata", "description": "Picklist values used in test data should match the valid values defined in Salesforce. Common hallucinated values like 'Active' for Campaign.Status or 'Open' for Opportunity.StageName are often invalid and will cause test failures.", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "major", "weight": 7, "recommendation": "Use only picklist values provided in the metadata context. For Campaign.Status, valid values are typically: Planned, In Progress, Completed, Aborted. For Opportunity.StageName, check your org's sales process stages. For Lead.Status, check your org's lead process. Never assume generic values like 'Active', 'Inactive', 'Open', 'Closed' unless explicitly confirmed.", @@ -2890,17 +2552,43 @@ "category": "StructureAndGrouping", "name": "Value elements must use valid class attribute", "description": "The 'class' attribute on elements must be one of the valid Provar value classes. Invalid class values like 'null' cause runtime errors. Valid classes are: value, variable, compound, funcCall, valueList, uiWait, uiLocator, uiTarget, uiInteraction, restTarget, excelTarget, csvTarget, namedValues, url, template, and expression operators (add, sub, mult, div, eq, ne, gt, lt, ge, le, and, or, match).", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "critical", "weight": 10, "recommendation": "Use a valid class attribute. For literal values use class='value' with appropriate valueClass (string, boolean, decimal, id, date, dateTime). For variables use class='variable'. For empty/optional arguments, omit the element entirely: ", "check": { "type": "invalidValueClass", "target": "step", - "validClasses": ["value", "variable", "compound", "funcCall", "valueList", "uiWait", "uiLocator", "uiTarget", "uiInteraction", "restTarget", "excelTarget", "csvTarget", "namedValues", "url", "template", "add", "sub", "mult", "div", "eq", "ne", "gt", "lt", "ge", "le", "and", "or", "match"], + "validClasses": [ + "value", + "variable", + "compound", + "funcCall", + "valueList", + "uiWait", + "uiLocator", + "uiTarget", + "uiInteraction", + "restTarget", + "excelTarget", + "csvTarget", + "namedValues", + "url", + "template", + "add", + "sub", + "mult", + "div", + "eq", + "ne", + "gt", + "lt", + "ge", + "le", + "and", + "or", + "match" + ], "validValueClasses": ["string", "boolean", "decimal", "id", "date", "dateTime"] }, "notes": "Based on corpus analysis of 1451 test files with 329,424 elements. Common AI hallucination: class='null' (never valid). Empty arguments should have no child.", @@ -2911,16 +2599,22 @@ "category": "StructureAndGrouping", "name": "UiAssert steps must include all required arguments", "description": "UiAssert steps must include columnAssertions, pageAssertions, resultScope, captureAfter, beforeWait, and autoRetry arguments. Based on corpus analysis of 4,577 UiAssert instances, 100% include these arguments (even if empty). Missing these required arguments causes Provar validation failures at runtime.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 8, "recommendation": "Include all required arguments in UiAssert steps. Use empty value lists for unused assertions: . For resultScope use 'Test'. For captureAfter use 'false'. For beforeWait and autoRetry, include empty argument tags: ", "check": { "type": "uiAssertMissingArguments", "target": "step", - "requiredArguments": ["fieldAssertions", "columnAssertions", "pageAssertions", "resultScope", "captureAfter", "beforeWait", "autoRetry"], + "requiredArguments": [ + "fieldAssertions", + "columnAssertions", + "pageAssertions", + "resultScope", + "captureAfter", + "beforeWait", + "autoRetry" + ], "recommendedArguments": ["resultName"] }, "notes": "Based on corpus analysis of 4,577 UiAssert instances across 694 test files. All instances include these arguments. AI commonly omits columnAssertions, pageAssertions, resultScope, beforeWait, and autoRetry.", @@ -2931,9 +2625,7 @@ "category": "StructureAndGrouping", "name": "UiAssert steps must NOT contain generatedParameters", "description": "UiAssert steps should NOT contain element. Based on corpus analysis of 4,577 UiAssert instances, 0% contain generatedParameters. This element is 100% hallucinated by AI and causes Provar validation failures.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Remove the entire section from UiAssert steps. Unlike UiDoAction steps which use generatedParameters for field metadata, UiAssert steps never use this element. The assertion configuration is entirely within the arguments element.", @@ -2949,9 +2641,7 @@ "category": "LocatorPatterns", "name": "UI binding parameter order must have object= first", "description": "In UI binding URIs, the object= parameter MUST come FIRST, followed by field= or action=. Wrong order causes 'Unknown control' errors in Provar at runtime.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "critical", "weight": 10, "recommendation": "Correct the binding parameter order. Use 'object?object=ObjectName&action=ActionName' or 'object?object=ObjectName&field=FieldName'. Never put action= or field= before object=.", @@ -2979,9 +2669,7 @@ "category": "TestCaseDesign", "name": "UiDoAction/UiAssert fields should exist in Salesforce metadata", "description": "When Salesforce object metadata is provided in the context, UI operations (UiDoAction field sets, UiAssert field assertions) should only reference fields that exist in the metadata. Fields mentioned in user story but not present in Salesforce metadata may cause 'Unknown control' errors at runtime. This validation catches hallucinated or non-existent fields before test execution.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Only use fields that exist in the Salesforce org metadata. If a field from the user story shows 'Unknown control' in Provar, either: (1) the field doesn't exist in this org, (2) the field API name is different, or (3) the field isn't on the page layout. Check the org's metadata or remove the field operation from the test.", @@ -3001,9 +2689,7 @@ "category": "TestCaseDesign", "name": "Page Object locator references non-existent field", "description": "When Page Objects are provided, ui:pageobject:field locators must reference fields (WebElement variables) that actually exist in the corresponding Page Object Java class. AI models commonly hallucinate field names that are not defined in the provided Page Objects, causing 'Unknown control' errors at runtime.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 5, "recommendation": "Only reference field names that exist in the provided Page Object definitions. Check the Page Object class for available WebElement variables. If the field you need is not in the Page Object, either: (1) use SF metadata binding (sf:ui:binding:object) instead, or (2) ensure the Page Object includes the required field. Available fields are listed in the Page Object section of the generation context.", @@ -3023,9 +2709,7 @@ "category": "XMLSchema", "name": "funcCall id must be a valid Provar function", "description": "When using , the function name must be one of Provar's known built-in functions. Unknown function names cause runtime errors as Provar cannot evaluate them. Common hallucinations include JavaScript-style functions like 'concat', 'toString', 'parseInt' instead of Provar's actual functions.", - "appliesTo": [ - "TestCase" - ], + "appliesTo": ["TestCase"], "severity": "major", "weight": 6, "recommendation": "Use only valid Provar function names: Count, DateAdd, DateFormat, DateParse, GetEnvironmentVariable, GetSelectedEnvironment, IsSorted, Not, NumberFormat, Round, StringNormalize, StringReplace, StringTrim, TestCaseErrors, TestCaseName, TestCaseOutcome, TestCasePath, TestCaseSuccessful, TestRunErrors, UniqueId. NOTE: Concatenate, PadLeft, PadRight, Substring are NOT valid - use valueList for string building.", @@ -3057,9 +2741,7 @@ "category": "TestCaseDesign", "name": "UiAssert must use compound fields for component field assertions", "description": "Salesforce compound fields (Name=FirstName+LastName, BillingAddress=BillingStreet+BillingCity+BillingState+BillingPostalCode+BillingCountry, ShippingAddress, MailingAddress) are displayed as single fields in the UI View screen but set as individual component fields in UiDoAction. UiAssert steps MUST assert the compound field (e.g., 'Name', 'BillingAddress') using a compound value that combines the individual component variables, NOT assert the individual component fields directly which don't exist in the View UI.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 6, "recommendation": "For Name field: Assert 'Name' using . For Address fields: Assert 'BillingAddress' or 'ShippingAddress' as the compound field, not individual BillingStreet/BillingCity etc. The fieldLocator should reference the compound field name (e.g., field=Name, field=BillingAddress).", @@ -3069,9 +2751,39 @@ "apiId": "com.provar.plugins.forcedotcom.core.ui.UiAssert", "compoundFields": { "Name": ["FirstName", "LastName", "Salutation", "MiddleName", "Suffix"], - "BillingAddress": ["BillingStreet", "BillingCity", "BillingState", "BillingStateCode", "BillingPostalCode", "BillingCountry", "BillingCountryCode", "BillingLatitude", "BillingLongitude"], - "ShippingAddress": ["ShippingStreet", "ShippingCity", "ShippingState", "ShippingStateCode", "ShippingPostalCode", "ShippingCountry", "ShippingCountryCode", "ShippingLatitude", "ShippingLongitude"], - "MailingAddress": ["MailingStreet", "MailingCity", "MailingState", "MailingStateCode", "MailingPostalCode", "MailingCountry", "MailingCountryCode", "MailingLatitude", "MailingLongitude"] + "BillingAddress": [ + "BillingStreet", + "BillingCity", + "BillingState", + "BillingStateCode", + "BillingPostalCode", + "BillingCountry", + "BillingCountryCode", + "BillingLatitude", + "BillingLongitude" + ], + "ShippingAddress": [ + "ShippingStreet", + "ShippingCity", + "ShippingState", + "ShippingStateCode", + "ShippingPostalCode", + "ShippingCountry", + "ShippingCountryCode", + "ShippingLatitude", + "ShippingLongitude" + ], + "MailingAddress": [ + "MailingStreet", + "MailingCity", + "MailingState", + "MailingStateCode", + "MailingPostalCode", + "MailingCountry", + "MailingCountryCode", + "MailingLatitude", + "MailingLongitude" + ] } }, "examples": { @@ -3092,9 +2804,7 @@ "category": "TestCaseDesign", "name": "UiDoAction lookup fields should use Name values, not IDs", "description": "When setting lookup/reference fields via UiDoAction (UI automation), use the record Name (text value the user sees) instead of the record Id. The Salesforce UI expects display values like 'ABC Corp' for Account lookups, not 18-character IDs like '001xx000003DGWaAAO'. Using IDs in UI fields causes lookup failures. Note: Apex CRUD operations (ApexCreateObject, ApexUpdateObject) correctly use IDs for lookup fields.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "major", "weight": 6, "recommendation": "For UiDoAction on lookup fields (AccountId, ContactId, OwnerId, etc.): 1) Create a variable for the record Name BEFORE creating the record (e.g., AccountName = 'ABC Corp'), 2) Use that Name variable in UiDoAction for lookups, 3) Only use ID variables for Apex CRUD operations or sfUiTargetObjectId navigation. Example: Set AccountName='ABC Corp' via SetValues, then use {AccountName} for UiDoAction on AccountId field.", @@ -3124,20 +2834,25 @@ "category": "TestCaseDesign", "name": "Date/DateTime assertions should use proper format functions", "description": "Salesforce Date and DateTime fields are stored in specific formats: DateTime uses ISO 8601 format (2026-01-14T05:36:12.000+0000) and Date uses yyyy-MM-dd format (2026-01-14). When asserting these fields in UI or API tests, raw date strings may cause assertion failures due to format mismatches, timezone differences, or precision issues. Use Provar's date functions (DateFormat, DateParse, DateAdd, TODAY, NOW) for reliable date comparisons.", - "appliesTo": [ - "Step" - ], + "appliesTo": ["Step"], "severity": "minor", "weight": 4, "recommendation": "For date assertions: 1) Use DateFormat() to format dates consistently: DateFormat(variable, 'yyyy-MM-dd', 'GMT'), 2) Use DateParse() to parse date strings with known formats, 3) Use TODAY for current date comparisons (yyyy-MM-dd format), 4) Use NOW for current datetime (yyyy-MM-dd HH:mm:ss.SSS format), 5) Use DateAdd() for date calculations. Always specify timezone for DateTime comparisons. Example: Assert CloseDate equals DateFormat(ExpectedDate, 'yyyy-MM-dd').", "check": { "type": "dateFormatAssertion", "target": "step", - "apiIds": [ - "com.provar.plugins.forcedotcom.core.ui.UiAssert", - "com.provar.plugins.bundled.apis.AssertValues" + "apiIds": ["com.provar.plugins.forcedotcom.core.ui.UiAssert", "com.provar.plugins.bundled.apis.AssertValues"], + "dateFieldPatterns": [ + "Date$", + "DateTime$", + "^Close", + "^Start", + "^End", + "^Created", + "^LastModified", + "^Birth", + "^Expir" ], - "dateFieldPatterns": ["Date$", "DateTime$", "^Close", "^Start", "^End", "^Created", "^LastModified", "^Birth", "^Expir"], "sfDateTimeFormat": "yyyy-MM-dd'T'HH:mm:ss.SSSZ", "sfDateFormat": "yyyy-MM-dd" }, @@ -3162,10 +2877,7 @@ "category": "XMLSchema", "name": "valueClass='date' requires epoch timestamp, not date string", "description": "When using valueClass='date' or valueClass='dateTime' in Provar test cases, the value MUST be an epoch timestamp in milliseconds (a numeric value like 1736899200000), NOT an ISO date string like '2025-01-15' or '2025-01-15T00:00:00.000Z'. Using string date formats with valueClass='date' causes the test case to fail loading in Provar IDE. This is a critical XML schema violation that completely breaks the test case.", - "appliesTo": [ - "Step", - "TestCase" - ], + "appliesTo": ["Step", "TestCase"], "severity": "critical", "weight": 10, "recommendation": "Either: 1) Use valueClass='string' if you want to specify dates as strings like '2025-01-15', OR 2) Use valueClass='date' with epoch timestamp in milliseconds (e.g., 1736899200000 for 2025-01-15). For ApexCreateObject/ApexUpdateObject date fields, use valueClass='string' with the date in 'YYYY-MM-DD' format. To convert: new Date('2025-01-15').getTime() = 1736899200000.", @@ -3189,4 +2901,4 @@ "source": "Corpus analysis: All valueClass='date' values are epoch timestamps; string dates cause Provar IDE load failures" } ] -} \ No newline at end of file +} diff --git a/src/mcp/server.ts b/src/mcp/server.ts index 88adf9c..27b889f 100644 --- a/src/mcp/server.ts +++ b/src/mcp/server.ts @@ -32,6 +32,8 @@ import { registerAllAntTools } from './tools/antTools.js'; import { registerAllRcaTools } from './tools/rcaTools.js'; import { registerAllTestPlanTools } from './tools/testPlanTools.js'; import { registerAllNitroXTools } from './tools/nitroXTools.js'; +import { registerAllTestCaseStepTools } from './tools/testCaseStepTools.js'; +import { registerAllConnectionTools } from './tools/connectionTools.js'; import { registerAllPrompts } from './prompts/index.js'; export interface ServerConfig { @@ -77,9 +79,11 @@ export function createProvarMcpServer(config: ServerConfig): McpServer { registerAllAutomationTools(server, config); registerAllDefectTools(server); registerAllAntTools(server, config); - registerAllRcaTools(server); + registerAllRcaTools(server, config); registerAllTestPlanTools(server, config); registerAllNitroXTools(server, config); + registerAllTestCaseStepTools(server, config); + registerAllConnectionTools(server, config); // ── Provar prompts ─────────────────────────────────────────────────────────── registerAllPrompts(server); diff --git a/src/mcp/tools/antTools.ts b/src/mcp/tools/antTools.ts index 8af0117..1780173 100644 --- a/src/mcp/tools/antTools.ts +++ b/src/mcp/tools/antTools.ts @@ -23,9 +23,7 @@ const FilesetSchema = z.object({ id: z .string() .optional() - .describe( - 'Fileset id — use "testplan" for plan-based runs, "testcases" for test case runs, omit for default' - ), + .describe('Fileset id — use "testplan" for plan-based runs, "testcases" for test case runs, omit for default'), includes: z .array(z.string()) .optional() @@ -35,9 +33,7 @@ const FilesetSchema = z.object({ }); const PlanFeatureSchema = z.object({ - name: z - .enum(['PDF', 'PIECHART', 'EMAIL', 'JUNIT']) - .describe('Feature name (PDF, PIECHART, EMAIL, JUNIT)'), + name: z.enum(['PDF', 'PIECHART', 'EMAIL', 'JUNIT']).describe('Feature name (PDF, PIECHART, EMAIL, JUNIT)'), type: z.enum(['OUTPUT', 'NOTIFICATION']).describe('Feature type'), enabled: z.boolean().describe('Whether this feature is enabled'), }); @@ -88,9 +84,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo project_path: z .string() .default('..') - .describe( - 'Path to the Provar test project root. Defaults to ".." (parent of the ANT folder).' - ), + .describe('Path to the Provar test project root. Defaults to ".." (parent of the ANT folder).'), results_path: z .string() .default('../ANT/Results') @@ -98,9 +92,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo project_cache_path: z .string() .optional() - .describe( - 'Path to the .provarCaches directory. Defaults to "../../.provarCaches" relative to the ANT folder.' - ), + .describe('Path to the .provarCaches directory. Defaults to "../../.provarCaches" relative to the ANT folder.'), license_path: z .string() .optional() @@ -130,14 +122,8 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo .string() .default('Full Screen') .describe('Browser window configuration (e.g. "Full Screen").'), - web_browser_provider_name: z - .string() - .default('Desktop') - .describe('Browser provider name (e.g. "Desktop").'), - web_browser_device_name: z - .string() - .default('Full Screen') - .describe('Browser device name (e.g. "Full Screen").'), + web_browser_provider_name: z.string().default('Desktop').describe('Browser provider name (e.g. "Desktop").'), + web_browser_device_name: z.string().default('Full Screen').describe('Browser device name (e.g. "Full Screen").'), test_environment: z .string() .default('') @@ -182,10 +168,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo .describe( 'When true, the ANT build does not fail even if tests fail. Useful for CI pipelines that collect results separately.' ), - invoke_test_run_monitor: z - .boolean() - .default(true) - .describe('Enable the Provar test run monitor.'), + invoke_test_run_monitor: z.boolean().default(true).describe('Enable the Provar test run monitor.'), // ── Secrets / security ────────────────────────────────────────────────── secrets_password: z @@ -202,10 +185,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo ), // ── Test Cycle ────────────────────────────────────────────────────────── - test_cycle_path: z - .string() - .optional() - .describe('Path to a TestCycle folder (used with test cycle reporting).'), + test_cycle_path: z.string().optional().describe('Path to a TestCycle folder (used with test cycle reporting).'), test_cycle_run_type: z .enum(['ALL', 'FAILED', 'NEW']) .optional() @@ -233,14 +213,8 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo .string() .optional() .describe('Where to write the build.xml file (returned in response). Required when dry_run=false.'), - overwrite: z - .boolean() - .default(false) - .describe('Overwrite output_path if the file already exists.'), - dry_run: z - .boolean() - .default(true) - .describe('true = return XML only (default); false = write to output_path.'), + overwrite: z.boolean().default(false).describe('Overwrite output_path if the file already exists.'), + dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path.'), }, (input) => { const requestId = makeRequestId(); @@ -296,7 +270,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo } catch (err: unknown) { const error = err as Error & { code?: string }; const errResult = makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'GENERATE_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'GENERATE_ERROR', error.message, requestId, false @@ -320,14 +294,8 @@ export function registerAntValidate(server: McpServer, config: ServerConfig): vo 'and at least one child. Returns is_valid, issues list, and a validity_score.', ].join(' '), { - content: z - .string() - .optional() - .describe('XML content to validate directly'), - file_path: z - .string() - .optional() - .describe('Path to the build.xml file to validate'), + content: z.string().optional().describe('XML content to validate directly'), + file_path: z.string().optional().describe('Path to the build.xml file to validate'), }, ({ content, file_path }) => { const requestId = makeRequestId(); @@ -467,9 +435,7 @@ function buildAntXml(input: AntGenerateInput): string { `\t` ); } else { - lines.push( - '\t' - ); + lines.push('\t'); } if (input.test_cycle_path) { @@ -536,9 +502,7 @@ function buildAntXml(input: AntGenerateInput): string { // ── Plan features ───────────────────────────────────────────────────────────── if (input.plan_features && input.plan_features.length > 0) { for (const pf of input.plan_features) { - lines.push( - `\t\t\t` - ); + lines.push(`\t\t\t`); } lines.push(''); } @@ -547,7 +511,11 @@ function buildAntXml(input: AntGenerateInput): string { if (input.email_properties) { const ep = input.email_properties; lines.push( - `\t\t\t` + `\t\t\t` ); } @@ -589,10 +557,7 @@ export interface AntValidationResult { issues: ValidationIssue[]; } -const REQUIRED_TASKDEF_CLASSNAMES = [ - 'com.provar.testrunner.ant.CompileTask', - 'com.provar.testrunner.ant.RunnerTask', -]; +const REQUIRED_TASKDEF_CLASSNAMES = ['com.provar.testrunner.ant.CompileTask', 'com.provar.testrunner.ant.RunnerTask']; const VALID_BROWSERS = ['Chrome', 'Chrome_Headless', 'Firefox', 'Edge', 'Edge_Legacy', 'Safari', 'IE']; const VALID_CACHE = ['Reuse', 'Refresh', 'Reload']; @@ -667,21 +632,55 @@ function validateProjectStructure( return target; } -function validateRtcEnumAttrs(rtc: Record, webBrowser: string | null, issues: ValidationIssue[]): void { +function validateRtcEnumAttrs( + rtc: Record, + webBrowser: string | null, + issues: ValidationIssue[] +): void { if (webBrowser && !VALID_BROWSERS.includes(webBrowser)) { - issues.push({ rule_id: 'ANT_030', severity: 'WARNING', message: `webBrowser "${webBrowser}" is not a recognised value. Expected one of: ${VALID_BROWSERS.join(', ')}.`, applies_to: 'Run-Test-Case', suggestion: `Use one of the supported browser values: ${VALID_BROWSERS.join(', ')}.` }); + issues.push({ + rule_id: 'ANT_030', + severity: 'WARNING', + message: `webBrowser "${webBrowser}" is not a recognised value. Expected one of: ${VALID_BROWSERS.join(', ')}.`, + applies_to: 'Run-Test-Case', + suggestion: `Use one of the supported browser values: ${VALID_BROWSERS.join(', ')}.`, + }); } const metadataCache = rtc['@_salesforceMetadataCache'] as string | undefined; if (metadataCache && !VALID_CACHE.includes(metadataCache)) { - issues.push({ rule_id: 'ANT_031', severity: 'WARNING', message: `salesforceMetadataCache "${metadataCache}" is not a recognised value. Expected one of: ${VALID_CACHE.join(', ')}.`, applies_to: 'Run-Test-Case', suggestion: `Use one of: ${VALID_CACHE.join(', ')}.` }); + issues.push({ + rule_id: 'ANT_031', + severity: 'WARNING', + message: `salesforceMetadataCache "${metadataCache}" is not a recognised value. Expected one of: ${VALID_CACHE.join( + ', ' + )}.`, + applies_to: 'Run-Test-Case', + suggestion: `Use one of: ${VALID_CACHE.join(', ')}.`, + }); } const testOutputLevel = rtc['@_testOutputlevel'] as string | undefined; if (testOutputLevel && !VALID_OUTPUT_LEVELS.includes(testOutputLevel)) { - issues.push({ rule_id: 'ANT_032', severity: 'WARNING', message: `testOutputlevel "${testOutputLevel}" is not a recognised value. Expected one of: ${VALID_OUTPUT_LEVELS.join(', ')}.`, applies_to: 'Run-Test-Case', suggestion: `Use one of: ${VALID_OUTPUT_LEVELS.join(', ')}.` }); + issues.push({ + rule_id: 'ANT_032', + severity: 'WARNING', + message: `testOutputlevel "${testOutputLevel}" is not a recognised value. Expected one of: ${VALID_OUTPUT_LEVELS.join( + ', ' + )}.`, + applies_to: 'Run-Test-Case', + suggestion: `Use one of: ${VALID_OUTPUT_LEVELS.join(', ')}.`, + }); } const disposition = rtc['@_resultsPathDisposition'] as string | undefined; if (disposition && !VALID_DISPOSITIONS.includes(disposition)) { - issues.push({ rule_id: 'ANT_033', severity: 'WARNING', message: `resultsPathDisposition "${disposition}" is not a recognised value. Expected one of: ${VALID_DISPOSITIONS.join(', ')}.`, applies_to: 'Run-Test-Case', suggestion: `Use one of: ${VALID_DISPOSITIONS.join(', ')}.` }); + issues.push({ + rule_id: 'ANT_033', + severity: 'WARNING', + message: `resultsPathDisposition "${disposition}" is not a recognised value. Expected one of: ${VALID_DISPOSITIONS.join( + ', ' + )}.`, + applies_to: 'Run-Test-Case', + suggestion: `Use one of: ${VALID_DISPOSITIONS.join(', ')}.`, + }); } } @@ -693,22 +692,52 @@ function validateRunTestCase(rtc: Record, issues: ValidationIss const testEnvironment = (rtc['@_testEnvironment'] as string | undefined) ?? null; if (!provarHome) { - issues.push({ rule_id: 'ANT_021', severity: 'ERROR', message: ' missing required "provarHome" attribute.', applies_to: 'Run-Test-Case', suggestion: 'Add provarHome="${provar.home}" to .' }); + issues.push({ + rule_id: 'ANT_021', + severity: 'ERROR', + message: ' missing required "provarHome" attribute.', + applies_to: 'Run-Test-Case', + suggestion: 'Add provarHome="${provar.home}" to .', + }); } if (!projectPath) { - issues.push({ rule_id: 'ANT_022', severity: 'ERROR', message: ' missing required "projectPath" attribute.', applies_to: 'Run-Test-Case', suggestion: 'Add projectPath="${testproject.home}" to .' }); + issues.push({ + rule_id: 'ANT_022', + severity: 'ERROR', + message: ' missing required "projectPath" attribute.', + applies_to: 'Run-Test-Case', + suggestion: 'Add projectPath="${testproject.home}" to .', + }); } if (!resultsPath) { - issues.push({ rule_id: 'ANT_023', severity: 'ERROR', message: ' missing required "resultsPath" attribute.', applies_to: 'Run-Test-Case', suggestion: 'Add resultsPath="${testproject.results}" to .' }); + issues.push({ + rule_id: 'ANT_023', + severity: 'ERROR', + message: ' missing required "resultsPath" attribute.', + applies_to: 'Run-Test-Case', + suggestion: 'Add resultsPath="${testproject.results}" to .', + }); } validateRtcEnumAttrs(rtc, webBrowser, issues); const filesets = (rtc['fileset'] as Array> | undefined) ?? []; if (filesets.length === 0) { - issues.push({ rule_id: 'ANT_040', severity: 'ERROR', message: ' has no children — no tests will be selected.', applies_to: 'Run-Test-Case', suggestion: 'Add at least one pointing to your tests or plans folder.' }); + issues.push({ + rule_id: 'ANT_040', + severity: 'ERROR', + message: ' has no children — no tests will be selected.', + applies_to: 'Run-Test-Case', + suggestion: 'Add at least one pointing to your tests or plans folder.', + }); } for (const [i, fsEntry] of filesets.entries()) { if (!(fsEntry['@_dir'] as string | undefined)) { - issues.push({ rule_id: 'ANT_041', severity: 'ERROR', message: ` at index ${i} is missing required "dir" attribute.`, applies_to: 'fileset', suggestion: 'Add dir="..." to each element.' }); + issues.push({ + rule_id: 'ANT_041', + severity: 'ERROR', + message: ` at index ${i} is missing required "dir" attribute.`, + applies_to: 'fileset', + suggestion: 'Add dir="..." to each element.', + }); } } return { provarHome, projectPath, resultsPath, webBrowser, testEnvironment, filesetCount: filesets.length }; @@ -734,8 +763,7 @@ export function validateAntXml(xmlContent: string): AntValidationResult { ignoreAttributes: false, attributeNamePrefix: '@_', parseAttributeValue: false, - isArray: (tagName) => - ['taskdef', 'target', 'fileset', 'include', 'planFeature'].includes(tagName), + isArray: (tagName) => ['taskdef', 'target', 'fileset', 'include', 'planFeature'].includes(tagName), }); let parsed: Record; try { @@ -797,8 +825,10 @@ export function validateAntXml(xmlContent: string): AntValidationResult { return finalizeAnt(issues, null, null, null, null, null, 0); } - const { provarHome, projectPath, resultsPath, webBrowser, testEnvironment, filesetCount } = - validateRunTestCase(rtc, issues); + const { provarHome, projectPath, resultsPath, webBrowser, testEnvironment, filesetCount } = validateRunTestCase( + rtc, + issues + ); return finalizeAnt(issues, provarHome, projectPath, resultsPath, webBrowser, testEnvironment, filesetCount); } @@ -831,6 +861,156 @@ function finalizeAnt( }; } +// ── JUnit XML step parsing ──────────────────────────────────────────────────── + +export interface JUnitStepResult { + testItemId: string; + title: string; + status: 'pass' | 'fail' | 'skip'; + errorMessage?: string; +} + +export interface JUnitParseResult { + steps: JUnitStepResult[]; + warning?: string; +} + +function extractFailureText(el: unknown): string | undefined { + if (!el) return undefined; + if (typeof el === 'string') return el.trim() || undefined; + if (typeof el === 'object') { + const obj = el as Record; + // Prefer CDATA body ('#text') — it has the specific error. Fall back to 'message' attribute. + const body = (obj['#text'] as string | undefined)?.trim(); + const msg = (obj['message'] as string | undefined)?.trim(); + if (body && msg && body !== msg) return `${msg}: ${body}`; + return body ?? msg; + } + return undefined; +} + +function extractStepsFromJUnit(parsed: Record): JUnitStepResult[] { + const steps: JUnitStepResult[] = []; + let idx = 0; + + // Normalise to array of suites — handles both and bare + let suites: Array> = []; + if (parsed['testsuites']) { + const inner = (parsed['testsuites'] as Record)['testsuite']; + suites = Array.isArray(inner) + ? (inner as Array>) + : inner + ? [inner as Record] + : []; + } else if (parsed['testsuite']) { + const ts = parsed['testsuite']; + suites = Array.isArray(ts) ? (ts as Array>) : [ts as Record]; + } + + for (const suite of suites) { + const rawTc = suite['testcase']; + if (!rawTc) continue; + const testcases: Array> = Array.isArray(rawTc) + ? (rawTc as Array>) + : [rawTc as Record]; + + for (const tc of testcases) { + idx++; + // Provar JUnit: name = test case file name (no attribute prefix since attributeNamePrefix: '') + const title = (tc['name'] as string | undefined) ?? `Test ${idx}`; + const hasFailure = 'failure' in tc || 'error' in tc; + const hasSkipped = 'skipped' in tc; + + let status: 'pass' | 'fail' | 'skip' = 'pass'; + if (hasFailure) status = 'fail'; + else if (hasSkipped) status = 'skip'; + + const errorMessage = extractFailureText(tc['failure'] ?? tc['error']); + const step: JUnitStepResult = { testItemId: String(idx), title, status }; + if (errorMessage) step.errorMessage = errorMessage; + steps.push(step); + } + } + + return steps; +} + +function findXmlFiles(dir: string): string[] { + const results: string[] = []; + try { + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { + if (entry.isFile() && entry.name.endsWith('.xml') && !entry.name.startsWith('.')) { + results.push(path.join(dir, entry.name)); + } + } + } catch { + // ignore unreadable dirs + } + return results; +} + +/** + * Scan a Provar results directory for JUnit XML files and return structured step results. + * Returns an empty steps array (+ optional warning) when no XML is found or parsing fails. + */ +export function parseJUnitResults(resultsDir: string): JUnitParseResult { + if (!fs.existsSync(resultsDir)) { + return { steps: [], warning: `Results directory not found: ${resultsDir}` }; + } + + const xmlFiles = findXmlFiles(resultsDir); + if (xmlFiles.length === 0) { + return { + steps: [], + warning: 'No JUnit XML files found in results directory — structured step output unavailable.', + }; + } + + const parser = new XMLParser({ + ignoreAttributes: false, + attributeNamePrefix: '', + textNodeName: '#text', + allowBooleanAttributes: true, + parseAttributeValue: false, + isArray: (tagName) => ['testsuite', 'testcase'].includes(tagName), + }); + + const allSteps: JUnitStepResult[] = []; + let parsedAny = false; + let parseFailures = 0; + + for (const xmlFile of xmlFiles) { + try { + const content = fs.readFileSync(xmlFile, 'utf-8'); + const parsed = parser.parse(content) as Record; + const steps = extractStepsFromJUnit(parsed); + allSteps.push(...steps); + parsedAny = true; + } catch { + parseFailures++; + } + } + + if (!parsedAny) { + return { + steps: [], + warning: 'JUnit XML files found but could not be parsed — structured step output unavailable.', + }; + } + if (allSteps.length === 0) { + return { + steps: [], + warning: 'JUnit XML found but no test steps could be extracted — files may not be standard JUnit format.', + }; + } + + const warning = + parseFailures > 0 + ? `${parseFailures} JUnit XML file(s) could not be parsed — step data may be incomplete.` + : undefined; + return { steps: allSteps, warning }; +} + // ── Registration ────────────────────────────────────────────────────────────── export function registerAllAntTools(server: McpServer, config: ServerConfig): void { diff --git a/src/mcp/tools/automationTools.ts b/src/mcp/tools/automationTools.ts index 703feed..42b4038 100644 --- a/src/mcp/tools/automationTools.ts +++ b/src/mcp/tools/automationTools.ts @@ -15,6 +15,7 @@ import { makeError, makeRequestId } from '../schemas/common.js'; import { log } from '../logging/logger.js'; import type { ServerConfig } from '../server.js'; import { assertPathAllowed, PathPolicyError } from '../security/pathPolicy.js'; +import { parseJUnitResults } from './antTools.js'; import { sfSpawnHelper, SfNotFoundError } from './sfSpawn.js'; // ── SF CLI discovery ────────────────────────────────────────────────────────── @@ -108,7 +109,7 @@ function resolveSfExecutable(): string | null { // missing — it exits non-zero with "not recognised" in stderr but sets no // probe.error. Trying shell:false first catches both cases correctly. // - // Phase 1: shell:false (works on Linux/macOS; gives ENOENT on Windows if + // First attempt: shell:false (works on Linux/macOS; gives ENOENT on Windows if // sf.cmd is on PATH but requires the shell). const probe = sfSpawnHelper.spawnSync('sf', ['--version'], { encoding: 'utf-8', @@ -120,7 +121,7 @@ function resolveSfExecutable(): string | null { return cachedSfPath; } - // Phase 2 (Windows only): retry with shell:true when the plain probe failed + // Windows fallback: retry with shell:true when the plain probe failed // with ENOENT — meaning sf.cmd exists on PATH but can't run without the shell. if (platform === 'win32' && (probe.error as NodeJS.ErrnoException | undefined)?.code === 'ENOENT') { const probeShell = sfSpawnHelper.spawnSync('sf', ['--version'], { @@ -321,9 +322,71 @@ export function filterTestRunOutput(raw: string): { filtered: string; suppressed return { filtered, suppressed }; } +// ── JUnit results enrichment ────────────────────────────────────────────────── + +// Overrideable in tests — bypasses the sf config file read +let sfResultsPathOverride: string | null | undefined; + +/** Exposed for testing only — set the results path returned by the sf config reader. Pass undefined to reset. */ +export function setSfResultsPathForTesting(p: string | null | undefined): void { + sfResultsPathOverride = p; +} + +/** + * Resolves the actual results directory for the latest run. + * Provar's Increment disposition creates Results(1), Results(2)… as siblings of Results/. + * Returns the latest sibling dir, or the base path if no siblings exist. + */ +function resolveLatestResultsDir(resultsBase: string): string { + const parent = path.dirname(resultsBase); + const base = path.basename(resultsBase); + try { + const safeName = base.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`^${safeName}\\((\\d+)\\)$`); + const indices = fs + .readdirSync(parent, { withFileTypes: true }) + .filter((e) => e.isDirectory() && pattern.test(e.name)) + .map((e) => parseInt((pattern.exec(e.name) as RegExpExecArray)[1], 10)); + if (indices.length > 0) { + const maxIdx = indices.reduce((a, b) => (a > b ? a : b), 0); + return path.join(parent, `${base}(${maxIdx})`); + } + } catch { + // ignore filesystem errors + } + return resultsBase; +} + +/** + * Reads resultsPath from the currently active provardx-properties.json (via ~/.sf/config.json), + * then resolves to the latest Increment-mode sibling directory. + * Returns null when the sf config or properties file cannot be read or is outside allowed paths. + */ +function readResultsPathFromSfConfig(config: ServerConfig): string | null { + if (sfResultsPathOverride !== undefined) return sfResultsPathOverride; + try { + const sfConfigPath = path.join(os.homedir(), '.sf', 'config.json'); + if (!fs.existsSync(sfConfigPath)) return null; + const sfConfig = JSON.parse(fs.readFileSync(sfConfigPath, 'utf-8')) as Record; + const propFilePath = sfConfig['PROVARDX_PROPERTIES_FILE_PATH'] as string | undefined; + if (!propFilePath || !fs.existsSync(propFilePath)) return null; + // Guard: only read the properties file if it is within the session's allowed paths. + assertPathAllowed(propFilePath, config.allowedPaths); + const props = JSON.parse(fs.readFileSync(propFilePath, 'utf-8')) as Record; + const resultsBase = props['resultsPath'] as string | undefined; + if (!resultsBase) return null; + const resultsDir = resolveLatestResultsDir(resultsBase); + // Guard: only read the results directory if it is within the session's allowed paths. + assertPathAllowed(resultsDir, config.allowedPaths); + return resultsDir; + } catch { + return null; + } +} + // ── Tool: provar.automation.testrun ─────────────────────────────────────────── -export function registerAutomationTestRun(server: McpServer): void { +export function registerAutomationTestRun(server: McpServer, config: ServerConfig): void { server.tool( 'provar.automation.testrun', [ @@ -353,14 +416,28 @@ export function registerAutomationTestRun(server: McpServer): void { const result = runSfCommand(['provar', 'automation', 'test', 'run', ...flags], sf_path); const { filtered, suppressed } = filterTestRunOutput(result.stdout); + // Attempt to enrich the response with structured step data from JUnit XML + const resultsPath = readResultsPathFromSfConfig(config); + const { steps, warning: junitWarning } = resultsPath + ? parseJUnitResults(resultsPath) + : { steps: [], warning: undefined }; + if (result.exitCode !== 0) { const { filtered: filteredErr, suppressed: suppressedErr } = filterTestRunOutput( result.stderr || result.stdout ); - const errBody = { + const errBody: Record = { ...makeError('AUTOMATION_TESTRUN_FAILED', filteredErr, requestId), ...(suppressedErr > 0 ? { output_lines_suppressed: suppressedErr } : {}), }; + if (steps.length > 0) errBody['steps'] = steps; + if (!resultsPath || junitWarning) { + errBody['details'] = { + warning: + junitWarning ?? + 'Could not locate results directory — step-level output unavailable. Run provar.automation.config.load first.', + }; + } return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(errBody) }] }; } @@ -371,6 +448,8 @@ export function registerAutomationTestRun(server: McpServer): void { stderr: result.stderr, }; if (suppressed > 0) response['output_lines_suppressed'] = suppressed; + if (steps.length > 0) response['steps'] = steps; + if (junitWarning) response['details'] = { warning: junitWarning }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { return handleSpawnError(err, requestId, 'provar.automation.testrun'); @@ -690,7 +769,7 @@ export function registerAutomationSetup(server: McpServer): void { export function registerAllAutomationTools(server: McpServer, config: ServerConfig): void { registerAutomationSetup(server); registerAutomationConfigLoad(server, config); - registerAutomationTestRun(server); + registerAutomationTestRun(server, config); registerAutomationCompile(server); registerAutomationMetadataDownload(server); } diff --git a/src/mcp/tools/bestPracticesEngine.ts b/src/mcp/tools/bestPracticesEngine.ts index ecee7fb..bcca9ca 100644 --- a/src/mcp/tools/bestPracticesEngine.ts +++ b/src/mcp/tools/bestPracticesEngine.ts @@ -83,10 +83,7 @@ let rulesConfig: BPRulesConfig | null = null; function getRulesConfig(): BPRulesConfig { if (!rulesConfig) { try { - const raw = readFileSync( - join(dirPath, '..', 'rules', 'provar_best_practices_rules.json'), - 'utf-8' - ); + const raw = readFileSync(join(dirPath, '..', 'rules', 'provar_best_practices_rules.json'), 'utf-8'); rulesConfig = JSON.parse(raw) as BPRulesConfig; } catch { rulesConfig = { schemaVersion: '1.0', rules: [], scoring: { defaultMaxScore: 100 } }; @@ -224,9 +221,7 @@ function getAllApiCalls(node: XmlNode): XmlNode[] { function getDirectSteps(tc: XmlNode): XmlNode[] { const steps = tc['steps'] as XmlNode | undefined; if (!steps || typeof steps !== 'object') return []; - return toArr(steps['apiCall'] as XmlNode | XmlNode[]).filter( - (c) => c && typeof c === 'object' - ); + return toArr(steps['apiCall'] as XmlNode | XmlNode[]).filter((c) => c && typeof c === 'object'); } /** @@ -249,7 +244,7 @@ function getArgValue(call: XmlNode, argId: string): string | undefined { const partsNode = (valElem['parts'] as XmlNode | undefined)?.['value']; const texts = toArr(partsNode as XmlNode | XmlNode[]) .filter((p) => p && typeof p === 'object') - .map((p) => (p)['#text'] as string | undefined) + .map((p) => p['#text'] as string | undefined) .filter(Boolean) as string[]; return texts.join('') || undefined; } @@ -260,9 +255,7 @@ function getArgValue(call: XmlNode, argId: string): string | undefined { /** Return all elements from an apiCall. */ function getArguments(call: XmlNode): XmlNode[] { - return toArr(call['argument'] as XmlNode | XmlNode[]).filter( - (a) => a && typeof a === 'object' - ); + return toArr(call['argument'] as XmlNode | XmlNode[]).filter((a) => a && typeof a === 'object'); } /** Return true if the apiCall has a disabled. */ @@ -276,7 +269,10 @@ function isDisabledCall(call: XmlNode): boolean { // ── Scoring formula (exact Lambda match) ───────────────────────────────────── const SEVERITY_MULT: Record = { - critical: 1.0, major: 0.75, minor: 0.5, info: 0.25, + critical: 1.0, + major: 0.75, + minor: 0.5, + info: 0.25, }; export function calculateBPScore(violations: BPViolation[]): number { @@ -328,9 +324,7 @@ function validateUnknownApiId(tc: XmlNode, rule: BPRule): BPViolation | null { if (!unknowns.length) return null; const MAX = 5; - const msgs = unknowns.slice(0, 3).map( - (v) => `'${v.apiId}'${v.tid ? ` (testItemId=${v.tid})` : ''}` - ); + const msgs = unknowns.slice(0, 3).map((v) => `'${v.apiId}'${v.tid ? ` (testItemId=${v.tid})` : ''}`); let message = `Unknown apiId(s) detected — these APIs do not exist in Provar: ${msgs.join('; ')}`; if (unknowns.length > 3) message += ` (+${unknowns.length - 3} more)`; return makeViolation(rule, message, Math.min(unknowns.length, MAX)); @@ -339,8 +333,8 @@ function validateUnknownApiId(tc: XmlNode, rule: BPRule): BPViolation | null { /** VALID-GUID-001 — test case must have at least one valid identifier. */ function validateValidIdentifier(tc: XmlNode, rule: BPRule): BPViolation | null { const guid = tc['@_guid'] as string | undefined; - const id = tc['@_id'] as string | undefined; - const rid = tc['@_registryId'] as string | undefined; + const id = tc['@_id'] as string | undefined; + const rid = tc['@_registryId'] as string | undefined; if (!guid && !id && !rid) { return makeViolation(rule, 'Test case missing valid identifier (guid, id, or registryId)'); } @@ -386,11 +380,7 @@ function validateWholeNumberTestItemId(tc: XmlNode, rule: BPRule): BPViolation | } if (!invalid.length) return null; - return makeViolation( - rule, - `Invalid testItemId values: ${invalid.slice(0, 3).join(', ')}`, - invalid.length - ); + return makeViolation(rule, `Invalid testItemId values: ${invalid.slice(0, 3).join(', ')}`, invalid.length); } /** VAR-NAMING-001 — variable names must match the identifier pattern. */ @@ -421,7 +411,7 @@ function validateVariableNaming(tc: XmlNode, rule: BPRule): BPViolation | null { if (nvContainer && typeof nvContainer === 'object') { for (const nv of toArr(nvContainer['namedValue'] as XmlNode | XmlNode[])) { if (!nv || typeof nv !== 'object') continue; - const name = (nv)['@_name'] as string | undefined; + const name = nv['@_name'] as string | undefined; if (name && !['valuePath', 'value', 'valueScope'].includes(name)) { checkName(name, 'SetValues variable'); } @@ -440,7 +430,7 @@ function validateVariableNaming(tc: XmlNode, rule: BPRule): BPViolation | null { /** STRUCT-GROUP-001 — direct steps should be wrapped in grouping elements. */ function validateMustAllBeInGroups(tc: XmlNode, rule: BPRule): BPViolation | null { const check = rule.check; - const acceptBdd = (check['acceptBddSteps'] as boolean | undefined) ?? true; + const acceptBdd = (check['acceptBddSteps'] as boolean | undefined) ?? true; const acceptFinally = (check['acceptFinallyBlocks'] as boolean | undefined) ?? true; const GROUPING_IDS = new Set(['com.provar.plugins.bundled.apis.control.StepGroup']); @@ -451,7 +441,8 @@ function validateMustAllBeInGroups(tc: XmlNode, rule: BPRule): BPViolation | nul 'com.provar.plugins.bundled.apis.bdd.Then', 'com.provar.plugins.bundled.apis.bdd.And', 'com.provar.plugins.bundled.apis.bdd.But', - ]) GROUPING_IDS.add(id); + ]) + GROUPING_IDS.add(id); } if (acceptFinally) GROUPING_IDS.add('com.provar.plugins.bundled.apis.control.Finally'); @@ -467,8 +458,9 @@ function validateMustAllBeInGroups(tc: XmlNode, rule: BPRule): BPViolation | nul if (!GROUPING_IDS.has(apiId)) { const title = (step['@_title'] as string | undefined) ?? - (step['@_name'] as string | undefined) ?? - apiId.split('.').pop() ?? ''; + (step['@_name'] as string | undefined) ?? + apiId.split('.').pop() ?? + ''; ungrouped.push(title.substring(0, 30)); } } @@ -477,10 +469,7 @@ function validateMustAllBeInGroups(tc: XmlNode, rule: BPRule): BPViolation | nul if (directSteps.length > 0 && apiCallCount / directSteps.length >= 0.8) return null; if (ungrouped.length > 3) { - return makeViolation( - rule, - `${ungrouped.length} steps not in groups: ${ungrouped.slice(0, 3).join(', ')}...` - ); + return makeViolation(rule, `${ungrouped.length} steps not in groups: ${ungrouped.slice(0, 3).join(', ')}...`); } return null; } @@ -489,36 +478,82 @@ function validateMustAllBeInGroups(tc: XmlNode, rule: BPRule): BPViolation | nul function validateDisabledStep(tc: XmlNode, rule: BPRule): BPViolation | null { for (const call of getAllApiCalls(tc)) { if (!isDisabledCall(call)) continue; - const tid = call['@_testItemId'] as string | undefined; - const apiId = (call['@_apiId'] as string | undefined) ?? 'unknown'; - const name = - (call['@_name'] as string | undefined) ?? + const tid = call['@_testItemId'] as string | undefined; + const apiId = (call['@_apiId'] as string | undefined) ?? 'unknown'; + const name = + (call['@_name'] as string | undefined) ?? (call['@_title'] as string | undefined) ?? - apiId.split('.').pop() ?? 'unnamed'; - return makeViolation( - rule, - `Disabled step found: "${name}"${tid ? ` (testItemId=${tid})` : ''}` - ); + apiId.split('.').pop() ?? + 'unnamed'; + return makeViolation(rule, `Disabled step found: "${name}"${tid ? ` (testItemId=${tid})` : ''}`); } return null; } // Arguments that should NOT be checked for duplicate literals const LITERAL_EXCLUDE_ARGS = new Set([ - 'uiConnectionName', 'apexConnectionName', 'resultScope', 'resultName', - 'sfUiTargetResultScope', 'sfUiTargetResultName', 'objectType', 'connectionId', - 'connectionName', 'comparisonType', 'assertionType', 'httpMethod', 'controlType', - 'loopType', 'navigate', 'windowSelection', 'windowSize', 'alreadyOpenBehaviour', - 'captureBefore', 'captureAfter', 'interactionDescription', 'targetDescription', 'title', + 'uiConnectionName', + 'apexConnectionName', + 'resultScope', + 'resultName', + 'sfUiTargetResultScope', + 'sfUiTargetResultName', + 'objectType', + 'connectionId', + 'connectionName', + 'comparisonType', + 'assertionType', + 'httpMethod', + 'controlType', + 'loopType', + 'navigate', + 'windowSelection', + 'windowSize', + 'alreadyOpenBehaviour', + 'captureBefore', + 'captureAfter', + 'interactionDescription', + 'targetDescription', + 'title', ]); const METADATA_FIELDS = new Set([ - 'status', 'stage', 'recordtype', 'recordtypeid', 'type', 'subtype', 'category', - 'priority', 'source', 'origin', 'reason', 'result', 'state', 'severity', - 'casestatus', 'leadstatus', 'opportunitystage', 'accounttype', 'industry', + 'status', + 'stage', + 'recordtype', + 'recordtypeid', + 'type', + 'subtype', + 'category', + 'priority', + 'source', + 'origin', + 'reason', + 'result', + 'state', + 'severity', + 'casestatus', + 'leadstatus', + 'opportunitystage', + 'accounttype', + 'industry', ]); const DEFAULT_LITERAL_VALUES = new Set([ - '0', '1', '0.0', '1.0', '', 'N/A', 'n/a', 'NA', 'None', 'Default', 'default', - 'Test', 'Global', 'Local', 'Folder', 'GroupStep', + '0', + '1', + '0.0', + '1.0', + '', + 'N/A', + 'n/a', + 'NA', + 'None', + 'Default', + 'default', + 'Test', + 'Global', + 'Local', + 'Folder', + 'GroupStep', ]); /** DDT-VAR-001 — detect hardcoded literal values repeated ≥2 times. */ @@ -563,8 +598,8 @@ function validateDetectDuplicatesLiterals(tc: XmlNode, rule: BPRule): BPViolatio /** NC-FOLDER-001 / NC-PARAM-001 — regex check on target value. */ function validateRegex(tc: XmlNode, rule: BPRule, metadata: BPMetadata): BPViolation | null { - const check = rule.check; - const target = (check['target'] as string | undefined) ?? ''; + const check = rule.check; + const target = (check['target'] as string | undefined) ?? ''; const pattern = (check['pattern'] as string | undefined) ?? ''; if (!pattern) return null; @@ -580,7 +615,10 @@ function validateRegex(tc: XmlNode, rule: BPRule, metadata: BPMetadata): BPViola outer: for (const call of getAllApiCalls(tc)) { for (const arg of getArguments(call)) { const argId = arg['@_id'] as string | undefined; - if (argId) { value = argId; break outer; } + if (argId) { + value = argId; + break outer; + } } } } @@ -599,17 +637,33 @@ function validateRegex(tc: XmlNode, rule: BPRule, metadata: BPMetadata): BPViola // Steps that WRITE to resultName (not just read) const RESULT_WRITE_STEPS = new Set([ - 'ApexConnect', 'UiConnect', 'DbConnect', 'WebConnect', - 'ApexCreateObject', 'ApexUpdateObject', 'ApexReadObject', - 'ApexSoqlQuery', 'ApexSoqlBuilder', - 'RestRequest', 'SoapRequest', - 'SqlQuery', 'DbInsert', 'DbRead', 'DbUpdate', - 'UiWithScreen', 'UiWithRow', + 'ApexConnect', + 'UiConnect', + 'DbConnect', + 'WebConnect', + 'ApexCreateObject', + 'ApexUpdateObject', + 'ApexReadObject', + 'ApexSoqlQuery', + 'ApexSoqlBuilder', + 'RestRequest', + 'SoapRequest', + 'SqlQuery', + 'DbInsert', + 'DbRead', + 'DbUpdate', + 'UiWithScreen', + 'UiWithRow', ]); // Patterns to exclude from result-name tracking const RESULT_EXCLUDE_PATTERNS = [ - 'StepGroup', 'control.If', 'control.While', 'control.ForEach', - 'control.Try', 'control.Finally', 'control.SetValues', + 'StepGroup', + 'control.If', + 'control.While', + 'control.ForEach', + 'control.Try', + 'control.Finally', + 'control.SetValues', ]; /** APEX-RESULTNAME-001 — detect duplicate resultNames within the same scope. */ @@ -640,9 +694,7 @@ function validateUniqueResultNames(tc: XmlNode, rule: BPRule): BPViolation | nul for (const [scope, names] of byScope) { for (const [name, steps] of names) { if (steps.length > 1) { - violations.push( - `'${name}' in ${scope} scope: written ${steps.length} times (${steps.slice(0, 2).join(', ')})` - ); + violations.push(`'${name}' in ${scope} scope: written ${steps.length} times (${steps.slice(0, 2).join(', ')})`); } } } @@ -655,21 +707,87 @@ function validateUniqueResultNames(tc: XmlNode, rule: BPRule): BPViolation | nul return makeViolation(rule, msg, Math.min(violations.length, MAX)); } +// ── UiWithScreen target URI validator ───────────────────────────────────────── + +const VALID_SF_UI_PARAMS = new Set([ + 'object', + 'action', + 'lightningComponent', + 'lightningWebComponent', + 'auraComponent', + 'application', + 'lookup', + 'fieldService', + 'tab', +]); + +/** Read the "target" argument from a UiWithScreen call via the wrapper. */ +function getUiWithScreenTarget(call: XmlNode): string | undefined { + const argsNode = call['arguments'] as XmlNode | undefined; + if (!argsNode || typeof argsNode !== 'object') return undefined; + for (const arg of toArr(argsNode['argument'] as XmlNode | XmlNode[])) { + if (!arg || typeof arg !== 'object') continue; + if (arg['@_id'] !== 'target') continue; + const valElem = arg['value'] as XmlNode | undefined; + if (!valElem || typeof valElem !== 'object') return undefined; + return (valElem['#text'] as string | undefined) ?? undefined; + } + return undefined; +} + +/** UI-SCREEN-TARGET — validate UiWithScreen target URIs (SF and page object formats). */ +function validateUiWithScreenTarget(tc: XmlNode, rule: BPRule): BPViolation | null { + const targetApiId = rule.check['apiId'] as string | undefined; + const violations: string[] = []; + + for (const call of getAllApiCalls(tc)) { + const apiId = call['@_apiId'] as string | undefined; + if (!apiId) continue; + if (targetApiId && !apiId.includes(targetApiId)) continue; + + const target = getUiWithScreenTarget(call); + if (!target) continue; + + if (target.startsWith('sf:')) { + const qs = target.includes('?') ? target.split('?')[1] : ''; + const hasValidParam = [...new URLSearchParams(qs).keys()].some((k) => VALID_SF_UI_PARAMS.has(k)); + if (!hasValidParam) { + violations.push(`UiWithScreen target "${target}" has no recognised SF parameters`); + } + } else if (target.startsWith('ui:pageobject:target')) { + if (!target.includes('?')) { + violations.push(`UiWithScreen target "${target}" uses colon format — use ?pageId=pageobjects.`); + } else { + const pageId = new URLSearchParams(target.split('?')[1]).get('pageId'); + if (!pageId?.startsWith('pageobjects.')) { + violations.push(`UiWithScreen target "${target}" pageId must start with "pageobjects."`); + } + } + } + } + + if (!violations.length) return null; + let msg = violations.slice(0, 2).join('; '); + if (violations.length > 2) msg += ` (and ${violations.length - 2} more)`; + return makeViolation(rule, msg, Math.min(violations.length, 3)); +} + // ── Validator dispatch map ──────────────────────────────────────────────────── type ValidatorFn = (tc: XmlNode, rule: BPRule) => BPViolation | null; const VALIDATOR_REGISTRY: Record = { - unknownApiId: validateUnknownApiId, - validIdentifier: validateValidIdentifier, - mustExist: validateMustExist, - mustExistAndNotEmpty: validateMustExistAndNotEmpty, - wholeNumberTestItemId: validateWholeNumberTestItemId, - variableNaming: validateVariableNaming, - mustAllBeInGroups: validateMustAllBeInGroups, - disabledStep: validateDisabledStep, + unknownApiId: validateUnknownApiId, + validIdentifier: validateValidIdentifier, + mustExist: validateMustExist, + mustExistAndNotEmpty: validateMustExistAndNotEmpty, + wholeNumberTestItemId: validateWholeNumberTestItemId, + variableNaming: validateVariableNaming, + mustAllBeInGroups: validateMustAllBeInGroups, + disabledStep: validateDisabledStep, detectDuplicatesLiterals: validateDetectDuplicatesLiterals, - uniqueResultNames: validateUniqueResultNames, + uniqueResultNames: validateUniqueResultNames, + uiWithScreenTarget: validateUiWithScreenTarget, // 'regex' is dispatched separately (needs metadata) }; @@ -690,10 +808,7 @@ const XML_PARSER = new XMLParser({ * @param metadata Optional context (testName used for regex rules). * @returns Quality score (0–100) + list of violations. */ -export function runBestPractices( - xmlContent: string, - metadata: BPMetadata = {} -): BPEngineResult { +export function runBestPractices(xmlContent: string, metadata: BPMetadata = {}): BPEngineResult { let parsed: XmlNode; try { parsed = XML_PARSER.parse(xmlContent) as XmlNode; diff --git a/src/mcp/tools/connectionTools.ts b/src/mcp/tools/connectionTools.ts new file mode 100644 index 0000000..c500e75 --- /dev/null +++ b/src/mcp/tools/connectionTools.ts @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2024 Provar Limited. + * All rights reserved. + * Licensed under the BSD 3-Clause license. + * For full license text, see LICENSE.md file in the repo root or https://opensource.org/licenses/BSD-3-Clause + */ + +/* eslint-disable camelcase */ +import fs from 'node:fs'; +import path from 'node:path'; +import { XMLParser } from 'fast-xml-parser'; +import { z } from 'zod'; +import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; +import type { ServerConfig } from '../server.js'; +import { assertPathAllowed, PathPolicyError } from '../security/pathPolicy.js'; +import { makeError, makeRequestId } from '../schemas/common.js'; +import { log } from '../logging/logger.js'; + +// ── Types ───────────────────────────────────────────────────────────────────── + +interface ConnectionEntry { + name: string; + type: string; + url?: string; + sso_configured: boolean; +} + +interface EnvironmentEntry { + name: string; + connection: string; + url?: string; +} + +// ── Class → human-readable type mapping ────────────────────────────────────── + +const CLASS_TO_TYPE: Record = { + sf: 'Salesforce', + ui: 'Web', + testmanager: 'Quality Hub', + webservice: 'Web Service', + database: 'Database', + google: 'Google', + msexc: 'Microsoft', + zephyr: 'Zephyr', + zephyrScale: 'Zephyr', + zephyrServer: 'Zephyr', + sso: 'SSO', +}; + +function classToType(className: string): string { + return CLASS_TO_TYPE[className] ?? className; +} + +// ── .testproject parsers ────────────────────────────────────────────────────── + +const TP_PARSER = new XMLParser({ + ignoreAttributes: false, + attributeNamePrefix: '@_', + parseAttributeValue: false, + isArray: (name): boolean => name === 'connectionClass' || name === 'connection' || name === 'environment', +}); + +class XmlParseError extends Error { + public code = 'CONNECTION_XML_PARSE_ERROR'; +} + +function parseTestProjectXml(content: string): Record { + let parsed: Record; + try { + parsed = TP_PARSER.parse(content) as Record; + } catch (e) { + throw new XmlParseError(`Failed to parse .testproject XML: ${(e as Error).message}`); + } + const raw = parsed['testProject']; + return raw !== null && typeof raw === 'object' ? (raw as Record) : {}; +} + +function parseConnectionList(content: string): ConnectionEntry[] { + const tp = parseTestProjectXml(content); + const cc = tp['connectionClasses']; + if (!cc || typeof cc !== 'object') return []; + + const classesRaw = (cc as Record)['connectionClass']; + if (!classesRaw) return []; + const classes = classesRaw as Array>; + + const connections: ConnectionEntry[] = []; + for (const cls of classes) { + const className = cls['@_name'] as string | undefined; + if (!className) continue; + const connsRaw = cls['connection']; + if (!connsRaw) continue; + for (const conn of connsRaw as Array>) { + const connName = conn['@_name'] as string | undefined; + if (!connName) continue; + const url = conn['@_url'] as string | undefined; + connections.push({ + name: connName, + type: classToType(className), + ...(url ? { url } : {}), + sso_configured: className === 'sso', + }); + } + } + return connections; +} + +function parseEnvironmentList(content: string): EnvironmentEntry[] { + const tp = parseTestProjectXml(content); + const envSection = tp['environments']; + if (!envSection || typeof envSection !== 'object') return []; + + const envsRaw = (envSection as Record)['environment']; + if (!envsRaw) return []; + + const environments: EnvironmentEntry[] = []; + for (const env of envsRaw as Array>) { + const name = env['@_name'] as string | undefined; + if (!name) continue; + const connection = env['@_connectionName'] as string | undefined; + const url = env['@_url'] as string | undefined; + environments.push({ + name, + connection: connection ?? '', + ...(url ? { url } : {}), + }); + } + return environments; +} + +// ── Tool registration ───────────────────────────────────────────────────────── + +export function registerConnectionList(server: McpServer, config: ServerConfig): void { + server.tool( + 'provar.connection.list', + [ + 'List all connections and named environments defined in the .testproject file.', + 'Use this before generating test cases or page objects to get the correct connection names.', + 'Returns connections (name, type, url, sso_configured) and environments (name, connection, url).', + 'Prerequisite: the project must have a .testproject file — run provar.project.validate first if unsure.', + 'Security: only connection names, types, and URLs are returned — credential values from .secrets are never included.', + ].join(' '), + { + project_path: z + .string() + .describe('Absolute or relative path to the Provar project root directory (must be within --allowed-paths)'), + }, + ({ project_path }) => { + const requestId = makeRequestId(); + log('info', 'provar.connection.list', { requestId, project_path }); + + try { + const resolvedPath = path.resolve(project_path); + assertPathAllowed(resolvedPath, config.allowedPaths); + + const testProjectPath = path.join(resolvedPath, '.testproject'); + if (!fs.existsSync(testProjectPath)) { + const err = makeError( + 'CONNECTION_FILE_NOT_FOUND', + `No .testproject file found at: ${testProjectPath}. Run provar.project.validate first to confirm the project structure.`, + requestId, + false, + { suggestion: 'Run provar.project.validate with the project_path to confirm the project root, then retry.' } + ); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + + assertPathAllowed(testProjectPath, config.allowedPaths); + + let content: string; + try { + content = fs.readFileSync(testProjectPath, 'utf-8'); + } catch (readErr) { + const err = makeError( + 'CONNECTION_FILE_READ_ERROR', + `Failed to read .testproject: ${(readErr as Error).message}`, + requestId, + false + ); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + + const connections = parseConnectionList(content); + const environments = parseEnvironmentList(content); + + const result = { + requestId, + project_path: resolvedPath, + connections, + environments, + summary: { + connection_count: connections.length, + environment_count: environments.length, + }, + }; + return { + content: [{ type: 'text' as const, text: JSON.stringify(result) }], + structuredContent: result, + }; + } catch (err: unknown) { + const error = err as Error & { code?: string }; + const errResult = makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'CONNECTION_LIST_ERROR', + error.message, + requestId, + false + ); + log('error', 'provar.connection.list failed', { requestId, error: error.message }); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; + } + } + ); +} + +export function registerAllConnectionTools(server: McpServer, config: ServerConfig): void { + registerConnectionList(server, config); +} diff --git a/src/mcp/tools/pageObjectGenerate.ts b/src/mcp/tools/pageObjectGenerate.ts index e40f295..c1c1366 100644 --- a/src/mcp/tools/pageObjectGenerate.ts +++ b/src/mcp/tools/pageObjectGenerate.ts @@ -16,13 +16,28 @@ import { makeError, makeRequestId } from '../schemas/common.js'; import { log } from '../logging/logger.js'; const VALID_LOCATOR_STRATEGIES = [ - 'xpath', 'id', 'css', 'name', 'className', 'tagName', - 'linkText', 'partialLinkText', 'visualforce', 'label', + 'xpath', + 'id', + 'css', + 'name', + 'className', + 'tagName', + 'linkText', + 'partialLinkText', + 'visualforce', + 'label', ] as const; const VALID_ELEMENT_TYPES = [ - 'TextType', 'ButtonType', 'LinkType', 'ChoiceListType', - 'RadioType', 'FileType', 'DateType', 'RichTextType', 'BooleanType', + 'TextType', + 'ButtonType', + 'LinkType', + 'ChoiceListType', + 'RadioType', + 'FileType', + 'DateType', + 'RichTextType', + 'BooleanType', ] as const; const FieldSchema = z.object({ @@ -35,10 +50,66 @@ const FieldSchema = z.object({ element_type: z.enum(VALID_ELEMENT_TYPES).default('TextType'), }); +type ToolResult = { isError: true; content: Array<{ type: 'text'; text: string }> } | null; + +function preflightAndWrite( + filePath: string, + javaSource: string, + ssoFilePath: string | undefined, + ssoSource: string | undefined, + overwrite: boolean, + requestId: string +): ToolResult { + if (fs.existsSync(filePath) && !overwrite) { + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('FILE_EXISTS', `File already exists: ${filePath}. Set overwrite=true to replace.`, requestId) + ), + }, + ], + }; + } + if (ssoSource && ssoFilePath && fs.existsSync(ssoFilePath) && !overwrite) { + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'FILE_EXISTS', + `SSO stub file already exists: ${ssoFilePath}. Set overwrite=true to replace.`, + requestId + ) + ), + }, + ], + }; + } + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, javaSource, 'utf-8'); + log('info', 'provar.pageobject.generate: wrote file', { requestId, filePath }); + if (ssoSource && ssoFilePath) { + fs.writeFileSync(ssoFilePath, ssoSource, 'utf-8'); + log('info', 'provar.pageobject.generate: wrote SSO stub', { requestId, ssoFilePath }); + } + return null; +} + export function registerPageObjectGenerate(server: McpServer, config: ServerConfig): void { server.tool( 'provar.pageobject.generate', - 'Generate a Provar Java Page Object skeleton with @Page/@SalesforcePage annotation, standard imports, and @FindBy WebElement fields. Returns Java source. Writes to disk only when dry_run=false.', + [ + 'Generate a Provar Java Page Object skeleton with @Page/@SalesforcePage annotation, standard imports, and @FindBy WebElement fields.', + 'Returns Java source. Writes to disk only when dry_run=false.', + 'SSO support: set sso_class to also generate an ILoginPage implementation stub for non-SF SSO pages.', + 'Example: sso_class="LoginPageSso" generates a LoginPageSso.java that implements ILoginPage with loginAs() and logout() stubs.', + 'The ILoginPage stub is written to the same directory as output_path when dry_run=false.', + ].join(' '), { class_name: z.string().describe('PascalCase class name, e.g. AccountDetailPage'), package_name: z @@ -49,10 +120,7 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf .enum(['standard', 'salesforce']) .default('standard') .describe('@Page (standard) or @SalesforcePage (salesforce)'), - title: z - .string() - .optional() - .describe('Page title attribute; defaults to class_name if omitted'), + title: z.string().optional().describe('Page title attribute; defaults to class_name if omitted'), connection_name: z .string() .optional() @@ -62,22 +130,18 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf .optional() .describe('Page type attribute for @SalesforcePage'), fields: z.array(FieldSchema).default([]).describe('WebElement fields to generate'), - output_path: z + sso_class: z .string() .optional() - .describe('Suggested file path for the .java file (returned in response)'), - overwrite: z - .boolean() - .default(false) - .describe('Overwrite existing file when dry_run=false'), - dry_run: z - .boolean() - .default(true) - .describe('true = return source only (default); false = write to output_path'), - idempotency_key: z - .string() - .optional() - .describe('Caller-provided key echoed back for deduplication tracking'), + .describe( + 'PascalCase class name for an ILoginPage implementation stub (non-SF SSO pages). ' + + 'When provided, an additional Java class implementing ILoginPage is generated alongside the page object. ' + + 'Example: "LoginPageSso" → LoginPageSso.java with loginAs() and logout() method stubs.' + ), + output_path: z.string().optional().describe('Suggested file path for the .java file (returned in response)'), + overwrite: z.boolean().default(false).describe('Overwrite existing file when dry_run=false'), + dry_run: z.boolean().default(true).describe('true = return source only (default); false = write to output_path'), + idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), }, (input) => { const requestId = makeRequestId(); @@ -85,34 +149,35 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf requestId, class_name: input.class_name, dry_run: input.dry_run, + sso_class: input.sso_class, }); try { const javaSource = buildPageObjectSource(input); - const filePath: string | undefined = input.output_path - ? path.resolve(input.output_path) - : undefined; + const filePath: string | undefined = input.output_path ? path.resolve(input.output_path) : undefined; let written = false; + const ssoSource = input.sso_class ? buildSsoLoginPageSource(input.sso_class, input.package_name) : undefined; + const ssoFilePath: string | undefined = + ssoSource && filePath ? path.join(path.dirname(filePath), `${input.sso_class}.java`) : undefined; + if (filePath && !input.dry_run) { assertPathAllowed(filePath, config.allowedPaths); + if (ssoFilePath) assertPathAllowed(ssoFilePath, config.allowedPaths); - if (fs.existsSync(filePath) && !input.overwrite) { - const err = makeError( - 'FILE_EXISTS', - `File already exists: ${filePath}. Set overwrite=true to replace.`, - requestId - ); - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; - } - - fs.mkdirSync(path.dirname(filePath), { recursive: true }); - fs.writeFileSync(filePath, javaSource, 'utf-8'); + const preflightErr = preflightAndWrite( + filePath, + javaSource, + ssoFilePath, + ssoSource, + input.overwrite, + requestId + ); + if (preflightErr) return preflightErr; written = true; - log('info', 'provar.pageobject.generate: wrote file', { requestId, filePath }); } - const result = { + const result: Record = { requestId, java_source: javaSource, file_path: filePath, @@ -120,6 +185,11 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf dry_run: input.dry_run, idempotency_key: input.idempotency_key, }; + if (ssoSource) { + result['sso_stub_source'] = ssoSource; + result['sso_stub_file_path'] = ssoFilePath; + result['sso_stub_written'] = written && !!ssoFilePath; + } return { content: [{ type: 'text' as const, text: JSON.stringify(result) }], structuredContent: result, @@ -127,7 +197,7 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf } catch (err: unknown) { const error = err as Error & { code?: string }; const errResult = makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'GENERATE_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'GENERATE_ERROR', error.message, requestId, false @@ -139,7 +209,7 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf ); } -// ── Source builder ──────────────────────────────────────────────────────────── +// ── Source builders ─────────────────────────────────────────────────────────── function buildPageObjectSource(input: { class_name: string; @@ -150,8 +220,7 @@ function buildPageObjectSource(input: { salesforce_page_attribute?: string; fields: Array>; }): string { - const { class_name, package_name, page_type, title, connection_name, salesforce_page_attribute, fields } = - input; + const { class_name, package_name, page_type, title, connection_name, salesforce_page_attribute, fields } = input; const pageTitle = title ?? class_name; let annotation: string; @@ -190,3 +259,24 @@ ${fieldBlocks} } `; } + +function buildSsoLoginPageSource(ssoClass: string, packageName: string): string { + return `package ${packageName}; + +import com.provar.core.testapi.annotations.sso.ILoginPage; + +public class ${ssoClass} implements ILoginPage { + + @Override + public void loginAs(String username, String password) { + // TODO: implement SSO login + } + + @Override + public void logout() { + // TODO: implement SSO logout + } + +} +`; +} diff --git a/src/mcp/tools/propertiesTools.ts b/src/mcp/tools/propertiesTools.ts index 7985481..7dd6cc2 100644 --- a/src/mcp/tools/propertiesTools.ts +++ b/src/mcp/tools/propertiesTools.ts @@ -7,6 +7,7 @@ /* eslint-disable camelcase */ import fs from 'node:fs'; +import os from 'node:os'; import path from 'node:path'; import { z } from 'zod'; import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; @@ -52,10 +53,19 @@ function validateProperties(props: Record): ValidationError[] { const meta = props['metadata'] as Record | undefined; if (meta && typeof meta === 'object') { for (const f of METADATA_REQUIRED) { - if (!meta[f]) errors.push({ field: `metadata.${f}`, message: `Required field "metadata.${f}" is missing`, severity: 'error' }); + if (!meta[f]) + errors.push({ + field: `metadata.${f}`, + message: `Required field "metadata.${f}" is missing`, + severity: 'error', + }); } if (meta['metadataLevel'] && !VALID_METADATA_LEVELS.includes(meta['metadataLevel'] as string)) { - errors.push({ field: 'metadata.metadataLevel', message: `metadata.metadataLevel must be one of: ${VALID_METADATA_LEVELS.join(', ')}`, severity: 'error' }); + errors.push({ + field: 'metadata.metadataLevel', + message: `metadata.metadataLevel must be one of: ${VALID_METADATA_LEVELS.join(', ')}`, + severity: 'error', + }); } } @@ -63,29 +73,63 @@ function validateProperties(props: Record): ValidationError[] { const env = props['environment'] as Record | undefined; if (env && typeof env === 'object') { for (const f of ENV_REQUIRED) { - if (!env[f]) errors.push({ field: `environment.${f}`, message: `Required field "environment.${f}" is missing`, severity: 'error' }); + if (!env[f]) + errors.push({ + field: `environment.${f}`, + message: `Required field "environment.${f}" is missing`, + severity: 'error', + }); } if (env['webBrowser'] && !VALID_BROWSERS.includes(env['webBrowser'] as string)) { - errors.push({ field: 'environment.webBrowser', message: `webBrowser must be one of: ${VALID_BROWSERS.join(', ')}`, severity: 'error' }); + errors.push({ + field: 'environment.webBrowser', + message: `webBrowser must be one of: ${VALID_BROWSERS.join(', ')}`, + severity: 'error', + }); } } // Optional enum fields - if (props['resultsPathDisposition'] && !VALID_RESULTS_DISPOSITION.includes(props['resultsPathDisposition'] as string)) { - errors.push({ field: 'resultsPathDisposition', message: `Must be one of: ${VALID_RESULTS_DISPOSITION.join(', ')}`, severity: 'error' }); + if ( + props['resultsPathDisposition'] && + !VALID_RESULTS_DISPOSITION.includes(props['resultsPathDisposition'] as string) + ) { + errors.push({ + field: 'resultsPathDisposition', + message: `Must be one of: ${VALID_RESULTS_DISPOSITION.join(', ')}`, + severity: 'error', + }); } if (props['testOutputLevel'] && !VALID_OUTPUT_LEVELS.includes(props['testOutputLevel'] as string)) { - errors.push({ field: 'testOutputLevel', message: `Must be one of: ${VALID_OUTPUT_LEVELS.join(', ')}`, severity: 'error' }); + errors.push({ + field: 'testOutputLevel', + message: `Must be one of: ${VALID_OUTPUT_LEVELS.join(', ')}`, + severity: 'error', + }); } if (props['pluginOutputlevel'] && !VALID_PLUGIN_LEVELS.includes(props['pluginOutputlevel'] as string)) { - errors.push({ field: 'pluginOutputlevel', message: `Must be one of: ${VALID_PLUGIN_LEVELS.join(', ')}`, severity: 'error' }); + errors.push({ + field: 'pluginOutputlevel', + message: `Must be one of: ${VALID_PLUGIN_LEVELS.join(', ')}`, + severity: 'error', + }); } // Warn about placeholder values still in file - const placeholders = ['${PROVAR_HOME}', '${PROVAR_PROJECT_PATH}', '${PROVAR_RESULTS_PATH}', '${PROVAR_TEST_ENVIRONMENT}', '${PROVAR_TEST_PROJECT_SECRETS}']; + const placeholders = [ + '${PROVAR_HOME}', + '${PROVAR_PROJECT_PATH}', + '${PROVAR_RESULTS_PATH}', + '${PROVAR_TEST_ENVIRONMENT}', + '${PROVAR_TEST_PROJECT_SECRETS}', + ]; for (const [key, value] of Object.entries(props)) { if (typeof value === 'string' && placeholders.includes(value)) { - errors.push({ field: key, message: `Field "${key}" still contains placeholder value "${value}" — replace with actual path or use an environment variable`, severity: 'warning' }); + errors.push({ + field: key, + message: `Field "${key}" still contains placeholder value "${value}" — replace with actual path or use an environment variable`, + severity: 'warning', + }); } } @@ -96,8 +140,14 @@ function validateProperties(props: Record): ValidationError[] { function deepMerge(target: Record, source: Record): Record { const result = { ...target }; for (const [key, val] of Object.entries(source)) { - if (val !== null && typeof val === 'object' && !Array.isArray(val) && - result[key] !== null && typeof result[key] === 'object' && !Array.isArray(result[key])) { + if ( + val !== null && + typeof val === 'object' && + !Array.isArray(val) && + result[key] !== null && + typeof result[key] === 'object' && + !Array.isArray(result[key]) + ) { result[key] = deepMerge(result[key] as Record, val as Record); } else { result[key] = val; @@ -122,7 +172,11 @@ export function registerPropertiesGenerate(server: McpServer, config: ServerConf project_path: z.string().optional().describe('Pre-fill the projectPath field with this value'), provar_home: z.string().optional().describe('Pre-fill the provarHome field with this value'), results_path: z.string().optional().describe('Pre-fill the resultsPath field with this value'), - overwrite: z.boolean().optional().default(false).describe('Overwrite the file if it already exists (default: false)'), + overwrite: z + .boolean() + .optional() + .default(false) + .describe('Overwrite the file if it already exists (default: false)'), dry_run: z.boolean().optional().default(false).describe('Return the content without writing (default: false)'), }, ({ output_path, project_path, provar_home, results_path, overwrite, dry_run }) => { @@ -134,15 +188,37 @@ export function registerPropertiesGenerate(server: McpServer, config: ServerConf const resolved = path.resolve(output_path); if (!overwrite && fs.existsSync(resolved)) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_EXISTS', `File already exists: ${resolved}. Set overwrite: true to replace it.`, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'FILE_EXISTS', + `File already exists: ${resolved}. Set overwrite: true to replace it.`, + requestId + ) + ), + }, + ], + }; } if (!resolved.endsWith('.json')) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('INVALID_PATH', 'output_path must end with .json', requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('INVALID_PATH', 'output_path must end with .json', requestId)), + }, + ], + }; } // Start from the template and apply any provided overrides - const content: Record = { ...propertyFileContent as unknown as Record }; + const content: Record = { ...(propertyFileContent as unknown as Record) }; if (project_path) content['projectPath'] = project_path; if (provar_home) content['provarHome'] = provar_home; if (results_path) content['resultsPath'] = results_path; @@ -158,19 +234,88 @@ export function registerPropertiesGenerate(server: McpServer, config: ServerConf ? 'Review the content, write to disk, then run provar.automation.config.load to register this file before compiling or running tests.' : `Run provar.automation.config.load with properties_path "${resolved}" to register this configuration. Required before provar.automation.compile or provar.automation.testrun will work.`; - const response = { requestId, file_path: resolved, written: !dry_run, dry_run: dry_run ?? false, content, next_steps: nextSteps }; + const response = { + requestId, + file_path: resolved, + written: !dry_run, + dry_run: dry_run ?? false, + content, + next_steps: nextSteps, + }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response, }; } catch (err: unknown) { const error = err as Error & { code?: string }; - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'GENERATE_ERROR'), error.message, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'GENERATE_ERROR', + error.message, + requestId + ) + ), + }, + ], + }; } } ); } +// ── Runtime divergence detection ────────────────────────────────────────────── + +// Overrideable in tests so we don't touch the real ~/.sf directory +let sfConfigDirOverride: string | null = null; + +/** Exposed for testing only — override the directory that contains config.json. Pass null to reset. */ +export function setSfConfigDirForTesting(dir: string | null): void { + sfConfigDirOverride = dir; +} + +/** + * Returns the properties file path registered via `sf provar automation config load`, + * or null if the sf config cannot be read. + */ +function readActivePropertiesPath(): string | null { + try { + const sfDir = sfConfigDirOverride ?? path.join(os.homedir(), '.sf'); + const sfConfigPath = path.join(sfDir, 'config.json'); + if (!fs.existsSync(sfConfigPath)) return null; + const sfConfig = JSON.parse(fs.readFileSync(sfConfigPath, 'utf-8')) as Record; + return (sfConfig['PROVARDX_PROPERTIES_FILE_PATH'] as string | undefined) ?? null; + } catch { + return null; + } +} + +/** + * Compare two properties objects on the keys most likely to cause silent bugs. + * Returns a human-readable description of any divergent keys, or null if all match. + */ +function buildDivergenceWarning( + diskPath: string, + diskContent: Record, + activePath: string, + activeContent: Record +): string | null { + const KEY_FIELDS = ['provarHome', 'projectPath', 'resultsPath']; + const divergent = KEY_FIELDS.filter((k) => JSON.stringify(diskContent[k]) !== JSON.stringify(activeContent[k])); + if (divergent.length === 0) return null; + const details = divergent + .map((k) => `${k}: disk="${String(diskContent[k])}" vs active="${String(activeContent[k])}"`) + .join(', '); + return ( + `The file you read (${diskPath}) differs from the active sf config (${activePath}) on: ${details}. ` + + 'Test runs use the active config values — run provar.automation.config.load with the correct file to sync.' + ); +} + // ── provar.properties.read ──────────────────────────────────────────────────── export function registerPropertiesRead(server: McpServer, config: ServerConfig): void { @@ -189,7 +334,21 @@ export function registerPropertiesRead(server: McpServer, config: ServerConfig): const resolved = path.resolve(file_path); if (!fs.existsSync(resolved)) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `File not found: ${resolved}`, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'PROPERTIES_FILE_NOT_FOUND', + `Properties file not found: ${resolved}. Use provar.properties.generate to create it.`, + requestId + ) + ), + }, + ], + }; } const raw = fs.readFileSync(resolved, 'utf-8'); @@ -197,17 +356,55 @@ export function registerPropertiesRead(server: McpServer, config: ServerConfig): try { parsed = JSON.parse(raw) as Record; } catch { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('MALFORMED_JSON', 'File is not valid JSON', requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('MALFORMED_JSON', 'File is not valid JSON', requestId)), + }, + ], + }; } - const response = { requestId, file_path: resolved, content: parsed }; + // Check whether the file being read matches what's registered as active in the sf config. + // If they differ on critical fields, surface a warning so the agent doesn't silently use stale values. + let divergenceWarning: string | undefined; + const activePath = readActivePropertiesPath(); + if (activePath && path.resolve(activePath) !== resolved) { + try { + // Guard: only read the active file if it is within the session's allowed paths. + assertPathAllowed(activePath, config.allowedPaths); + const activeContent = JSON.parse(fs.readFileSync(activePath, 'utf-8')) as Record; + divergenceWarning = buildDivergenceWarning(resolved, parsed, activePath, activeContent) ?? undefined; + } catch { + // Ignore — active path may be outside allowed paths or unreadable + } + } + + const response: Record = { requestId, file_path: resolved, content: parsed }; + if (divergenceWarning) response['details'] = { warning: divergenceWarning }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response, }; } catch (err: unknown) { const error = err as Error & { code?: string }; - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'READ_ERROR'), error.message, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'READ_ERROR', + error.message, + requestId + ) + ), + }, + ], + }; } } ); @@ -215,37 +412,64 @@ export function registerPropertiesRead(server: McpServer, config: ServerConfig): // ── provar.properties.set ───────────────────────────────────────────────────── -const updatesSchema = z.object({ - provarHome: z.string().optional().describe('Path to Provar installation directory'), - projectPath: z.string().optional().describe('Path to the Provar test project root'), - resultsPath: z.string().optional().describe('Path where test results will be written'), - resultsPathDisposition: z.enum(['Increment', 'Replace', 'Fail']).optional().describe('What to do if results path already exists'), - testOutputLevel: z.enum(['BASIC', 'DETAILED', 'DIAGNOSTIC']).optional().describe('Amount of test output logged'), - pluginOutputlevel: z.enum(['SEVERE', 'WARNING', 'INFO', 'FINE', 'FINER', 'FINEST']).optional().describe('Amount of plugin output logged'), - stopOnError: z.boolean().optional().describe('Abort test run on first failure'), - excludeCallable: z.boolean().optional().describe('Omit callable test cases from execution'), - testprojectSecrets: z.string().optional().describe( - 'Encryption key (password string) used to decrypt the .secrets file in the Provar project root. ' + - 'This is the key itself — NOT a file path. Omit this field unless your project uses secrets encryption.' - ), - environment: z.object({ - testEnvironment: z.string().optional().describe('Name of the test environment to run against'), - webBrowser: z.enum(['Chrome', 'Safari', 'Edge', 'Edge_Legacy', 'Firefox', 'IE', 'Chrome_Headless']).optional(), - webBrowserConfig: z.string().optional(), - webBrowserProviderName: z.string().optional(), - webBrowserDeviceName: z.string().optional(), - }).optional().describe('Test execution environment settings'), - metadata: z.object({ - metadataLevel: z.enum(['Reuse', 'Reload', 'Refresh']).optional().describe('Salesforce metadata cache strategy'), - cachePath: z.string().optional().describe('Path for the metadata cache'), - }).optional().describe('Salesforce metadata settings'), - testCase: z.array(z.string()).optional().describe('Specific test case file paths to run (relative to projectPath/tests/). NOTE: data-driven iteration does NOT work in this mode — data table variables resolve as null. To run data-driven tests, add the test case to a plan with provar.testplan.add-instance and run via testPlan instead.'), - testPlan: z.array(z.string()).optional().describe('Test plan names to run (wildcards permitted)'), - connectionOverride: z.array(z.object({ - connection: z.string().describe('Provar connection name'), - username: z.string().describe('SFDX username or alias to substitute'), - })).optional().describe('Override Provar connections with SFDX usernames'), -}).describe('Fields to update in the properties file — only provided fields are changed'); +const updatesSchema = z + .object({ + provarHome: z.string().optional().describe('Path to Provar installation directory'), + projectPath: z.string().optional().describe('Path to the Provar test project root'), + resultsPath: z.string().optional().describe('Path where test results will be written'), + resultsPathDisposition: z + .enum(['Increment', 'Replace', 'Fail']) + .optional() + .describe('What to do if results path already exists'), + testOutputLevel: z.enum(['BASIC', 'DETAILED', 'DIAGNOSTIC']).optional().describe('Amount of test output logged'), + pluginOutputlevel: z + .enum(['SEVERE', 'WARNING', 'INFO', 'FINE', 'FINER', 'FINEST']) + .optional() + .describe('Amount of plugin output logged'), + stopOnError: z.boolean().optional().describe('Abort test run on first failure'), + excludeCallable: z.boolean().optional().describe('Omit callable test cases from execution'), + testprojectSecrets: z + .string() + .optional() + .describe( + 'Encryption key (password string) used to decrypt the .secrets file in the Provar project root. ' + + 'This is the key itself — NOT a file path. Omit this field unless your project uses secrets encryption.' + ), + environment: z + .object({ + testEnvironment: z.string().optional().describe('Name of the test environment to run against'), + webBrowser: z.enum(['Chrome', 'Safari', 'Edge', 'Edge_Legacy', 'Firefox', 'IE', 'Chrome_Headless']).optional(), + webBrowserConfig: z.string().optional(), + webBrowserProviderName: z.string().optional(), + webBrowserDeviceName: z.string().optional(), + }) + .optional() + .describe('Test execution environment settings'), + metadata: z + .object({ + metadataLevel: z.enum(['Reuse', 'Reload', 'Refresh']).optional().describe('Salesforce metadata cache strategy'), + cachePath: z.string().optional().describe('Path for the metadata cache'), + }) + .optional() + .describe('Salesforce metadata settings'), + testCase: z + .array(z.string()) + .optional() + .describe( + 'Specific test case file paths to run (relative to projectPath/tests/). NOTE: data-driven iteration does NOT work in this mode — data table variables resolve as null. To run data-driven tests, add the test case to a plan with provar.testplan.add-instance and run via testPlan instead.' + ), + testPlan: z.array(z.string()).optional().describe('Test plan names to run (wildcards permitted)'), + connectionOverride: z + .array( + z.object({ + connection: z.string().describe('Provar connection name'), + username: z.string().describe('SFDX username or alias to substitute'), + }) + ) + .optional() + .describe('Override Provar connections with SFDX usernames'), + }) + .describe('Fields to update in the properties file — only provided fields are changed'); export function registerPropertiesSet(server: McpServer, config: ServerConfig): void { server.tool( @@ -270,14 +494,36 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): const resolved = path.resolve(file_path); if (!fs.existsSync(resolved)) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `File not found: ${resolved}. Use provar.properties.generate to create it first.`, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'PROPERTIES_FILE_NOT_FOUND', + `File not found: ${resolved}. Use provar.properties.generate to create it first.`, + requestId + ) + ), + }, + ], + }; } let current: Record; try { current = JSON.parse(fs.readFileSync(resolved, 'utf-8')) as Record; } catch { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('MALFORMED_JSON', 'Existing file is not valid JSON', requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('MALFORMED_JSON', 'Existing file is not valid JSON', requestId)), + }, + ], + }; } const updatesRecord = updates as Record; @@ -293,7 +539,21 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): }; } catch (err: unknown) { const error = err as Error & { code?: string }; - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'SET_ERROR'), error.message, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'SET_ERROR', + error.message, + requestId + ) + ), + }, + ], + }; } } ); @@ -318,7 +578,15 @@ export function registerPropertiesValidate(server: McpServer, config: ServerConf log('info', 'provar.properties.validate', { requestId, file_path }); if (!file_path && !content) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('MISSING_INPUT', 'Provide either file_path or content', requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('MISSING_INPUT', 'Provide either file_path or content', requestId)), + }, + ], + }; } try { @@ -328,7 +596,17 @@ export function registerPropertiesValidate(server: McpServer, config: ServerConf assertPathAllowed(file_path, config.allowedPaths); const resolved = path.resolve(file_path); if (!fs.existsSync(resolved)) { - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `File not found: ${resolved}`, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('PROPERTIES_FILE_NOT_FOUND', `File not found: ${resolved}`, requestId) + ), + }, + ], + }; } rawJson = fs.readFileSync(resolved, 'utf-8'); } else { @@ -339,7 +617,14 @@ export function registerPropertiesValidate(server: McpServer, config: ServerConf try { parsed = JSON.parse(rawJson) as Record; } catch { - const response = { requestId, is_valid: false, error_count: 1, warning_count: 0, errors: [{ field: '(root)', message: 'File is not valid JSON', severity: 'error' }], warnings: [] }; + const response = { + requestId, + is_valid: false, + error_count: 1, + warning_count: 0, + errors: [{ field: '(root)', message: 'File is not valid JSON', severity: 'error' }], + warnings: [], + }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } @@ -361,7 +646,21 @@ export function registerPropertiesValidate(server: McpServer, config: ServerConf }; } catch (err: unknown) { const error = err as Error & { code?: string }; - return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'VALIDATE_ERROR'), error.message, requestId)) }] }; + return { + isError: true, + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'VALIDATE_ERROR', + error.message, + requestId + ) + ), + }, + ], + }; } } ); diff --git a/src/mcp/tools/qualityHubTools.ts b/src/mcp/tools/qualityHubTools.ts index 223dc7d..a1b8120 100644 --- a/src/mcp/tools/qualityHubTools.ts +++ b/src/mcp/tools/qualityHubTools.ts @@ -12,13 +12,21 @@ import { makeError, makeRequestId } from '../schemas/common.js'; import { log } from '../logging/logger.js'; import { runSfCommand } from './sfSpawn.js'; - -function handleSpawnError(err: unknown, requestId: string, toolName: string): { isError: true; content: Array<{ type: 'text'; text: string }> } { +function handleSpawnError( + err: unknown, + requestId: string, + toolName: string +): { isError: true; content: Array<{ type: 'text'; text: string }> } { const error = err as Error & { code?: string }; log('error', `${toolName} failed`, { requestId, error: error.message }); return { isError: true as const, - content: [{ type: 'text' as const, text: JSON.stringify(makeError(error.code ?? 'SF_ERROR', error.message, requestId, false)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError(error.code ?? 'SF_ERROR', error.message, requestId, false)), + }, + ], }; } @@ -30,7 +38,11 @@ export function registerQualityHubConnect(server: McpServer): void { 'Connect to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` with the supplied flags.', { target_org: z.string().describe('SF org alias or username to connect as'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags to forward (e.g. ["--json"])'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Additional raw CLI flags to forward (e.g. ["--json"])'), }, ({ target_org, flags }) => { const requestId = makeRequestId(); @@ -41,7 +53,15 @@ export function registerQualityHubConnect(server: McpServer): void { const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_CONNECT_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_CONNECT_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; @@ -74,7 +94,15 @@ export function registerQualityHubDisplay(server: McpServer): void { const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_DISPLAY_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_DISPLAY_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; @@ -87,24 +115,65 @@ export function registerQualityHubDisplay(server: McpServer): void { // ── Tool: provar.qualityhub.testrun ────────────────────────────────────────── +function detectWildcardFlags(flags: string[]): string | undefined { + for (let i = 0; i < flags.length - 1; i++) { + if (flags[i] === '--plan-name') { + const value = flags[i + 1]; + if (value.includes('*') || value.includes('?')) { + return ( + `Wildcard testPlan scope detected ("${value}"). ` + + 'QH plan-level reporting will be skipped. ' + + 'Use exact plan names for QH plan reporting.' + ); + } + } + } + return undefined; +} + export function registerQualityHubTestRun(server: McpServer): void { server.tool( 'provar.qualityhub.testrun', - 'Trigger a Quality Hub test run. Invokes `sf provar quality-hub test run`.', + 'Trigger a Quality Hub test run. Invokes `sf provar quality-hub test run`. ' + + 'Warning: wildcard characters (* or ?) in flag values will cause QH plan-level reporting to be skipped — use exact plan names.', { target_org: z.string().describe('SF org alias or username'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags (e.g. ["--plan-name", "SmokeTests"])'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe( + 'Additional raw CLI flags (e.g. ["--plan-name", "SmokeTests"]). Avoid wildcards in --plan-name values — they skip QH plan-level reporting.' + ), }, ({ target_org, flags }) => { const requestId = makeRequestId(); log('info', 'provar.qualityhub.testrun', { requestId, target_org }); try { + const wildcardWarning = detectWildcardFlags(flags); const result = runSfCommand(['provar', 'quality-hub', 'test', 'run', '--target-org', target_org, ...flags]); - const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; + const response: Record = { + requestId, + exitCode: result.exitCode, + stdout: result.stdout, + stderr: result.stderr, + }; + + if (wildcardWarning) { + response['details'] = { warning: wildcardWarning }; + } if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_TESTRUN_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_TESTRUN_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; @@ -132,15 +201,29 @@ export function registerQualityHubTestRunReport(server: McpServer): void { try { const result = runSfCommand([ - 'provar', 'quality-hub', 'test', 'run', 'report', - '--target-org', target_org, - '--run-id', run_id, + 'provar', + 'quality-hub', + 'test', + 'run', + 'report', + '--target-org', + target_org, + '--run-id', + run_id, ...flags, ]); const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_REPORT_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_REPORT_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } const failureStatuses = new Set(['FAIL', 'FAILED']); @@ -187,15 +270,29 @@ export function registerQualityHubTestRunAbort(server: McpServer): void { try { const result = runSfCommand([ - 'provar', 'quality-hub', 'test', 'run', 'abort', - '--target-org', target_org, - '--run-id', run_id, + 'provar', + 'quality-hub', + 'test', + 'run', + 'abort', + '--target-org', + target_org, + '--run-id', + run_id, ...flags, ]); const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_ABORT_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_ABORT_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; @@ -214,18 +311,38 @@ export function registerQualityHubTestcaseRetrieve(server: McpServer): void { 'Retrieve Quality Hub test cases by user story or component. Invokes `sf provar quality-hub testcase retrieve`.', { target_org: z.string().describe('SF org alias or username'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags (e.g. ["--user-story", "US-123"])'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Additional raw CLI flags (e.g. ["--user-story", "US-123"])'), }, ({ target_org, flags }) => { const requestId = makeRequestId(); log('info', 'provar.qualityhub.testcase.retrieve', { requestId, target_org }); try { - const result = runSfCommand(['provar', 'quality-hub', 'testcase', 'retrieve', '--target-org', target_org, ...flags]); + const result = runSfCommand([ + 'provar', + 'quality-hub', + 'testcase', + 'retrieve', + '--target-org', + target_org, + ...flags, + ]); const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('QH_RETRIEVE_FAILED', result.stderr || result.stdout, requestId)) }] }; + return { + isError: true as const, + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('QH_RETRIEVE_FAILED', result.stderr || result.stdout, requestId)), + }, + ], + }; } return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; diff --git a/src/mcp/tools/rcaTools.ts b/src/mcp/tools/rcaTools.ts index 3202965..564b336 100644 --- a/src/mcp/tools/rcaTools.ts +++ b/src/mcp/tools/rcaTools.ts @@ -12,6 +12,8 @@ import path from 'node:path'; import { z } from 'zod'; import { XMLParser } from 'fast-xml-parser'; import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; +import type { ServerConfig } from '../server.js'; +import { assertPathAllowed, PathPolicyError } from '../security/pathPolicy.js'; import { makeError, makeRequestId } from '../schemas/common.js'; import { log } from '../logging/logger.js'; @@ -665,17 +667,23 @@ function buildFailureReports( // ── provar.testrun.rca tool ─────────────────────────────────────────────────── -export function registerTestRunRca(server: McpServer): void { +export function registerTestRunRca(server: McpServer, config: ServerConfig): void { server.tool( 'provar.testrun.rca', [ 'Parse a completed Provar test run and produce a structured Root Cause Analysis (RCA) report.', 'Resolves the results directory, parses JUnit.xml, classifies each failure by category,', 'and produces recommendations. Use locate_only=true to skip parsing and just resolve artifact locations.', + 'Use mode="failures" to get a lightweight array of failed test cases', + '([{ testItemId, title, errorMessage }]) without the full RCA classification — useful when you', + 'need failure names quickly without loading the HTML report.', ].join(' '), { project_path: z.string().describe('Absolute path to the Provar project root'), - results_path: z.string().optional().describe('Explicit override for the results base directory'), + results_path: z + .string() + .optional() + .describe('Explicit override for the results base directory; must be within --allowed-paths if provided'), run_index: z .number() .int() @@ -687,12 +695,33 @@ export function registerTestRunRca(server: McpServer): void { .optional() .default(false) .describe('If true, skip parsing and return just artifact locations'), + mode: z + .enum(['rca', 'failures']) + .optional() + .default('rca') + .describe( + '"rca" (default): full root-cause analysis with classification and recommendations. ' + + '"failures": lightweight array of failed test cases [{ testItemId, title, errorMessage }].' + ), }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.testrun.rca', { requestId, locate_only: input.locate_only }); + log('info', 'provar.testrun.rca', { requestId, locate_only: input.locate_only, mode: input.mode }); try { + // ── Path policy ────────────────────────────────────────────────────── + const pathsToCheck: string[] = [path.resolve(input.project_path)]; + if (input.results_path) pathsToCheck.push(input.results_path); + for (const p of pathsToCheck) { + try { + assertPathAllowed(p, config.allowedPaths); + } catch (pErr: unknown) { + const e = pErr as Error & { code?: string }; + const err = makeError(e instanceof PathPolicyError ? e.code : 'PATH_NOT_ALLOWED', e.message, requestId); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + } + // ── Resolve location ───────────────────────────────────────────────── const resolved = resolveResultsLocation(input.project_path, input.results_path, input.run_index); if ('error' in resolved) { @@ -741,6 +770,30 @@ export function registerTestRunRca(server: McpServer): void { }; } + // ── mode=failures: lightweight failure list ────────────────────────── + if (input.mode === 'failures') { + if (!junit_xml) { + const result = { + requestId, + results_dir, + failures: [] as Array<{ testItemId: string; title: string; errorMessage: string }>, + details: { warning: 'JUnit.xml not found in results directory — no failure data available' }, + }; + return { content: [{ type: 'text' as const, text: JSON.stringify(result) }] }; + } + const xmlContent = fs.readFileSync(junit_xml, 'utf-8'); + const parsed = parseJUnit(xmlContent); + const failures = parsed.testcases + .filter((tc) => tc.failureText !== null && !tc.isSkipped) + .map((tc) => ({ + testItemId: tc.name, + title: tc.name, + errorMessage: (tc.failureText ?? '').slice(0, 500), + })); + const result = { requestId, results_dir, failures }; + return { content: [{ type: 'text' as const, text: JSON.stringify(result) }] }; + } + // ── Check JUnit.xml exists ─────────────────────────────────────────── if (!junit_xml) { const result = { @@ -809,7 +862,7 @@ export function registerTestRunRca(server: McpServer): void { // ── Registration entry point ────────────────────────────────────────────────── -export function registerAllRcaTools(server: McpServer): void { +export function registerAllRcaTools(server: McpServer, config: ServerConfig): void { registerTestRunLocate(server); - registerTestRunRca(server); + registerTestRunRca(server, config); } diff --git a/src/mcp/tools/testCaseGenerate.ts b/src/mcp/tools/testCaseGenerate.ts index d4ec39f..e053cf9 100644 --- a/src/mcp/tools/testCaseGenerate.ts +++ b/src/mcp/tools/testCaseGenerate.ts @@ -99,6 +99,9 @@ const StepSchema = z.object({ const TOOL_DESCRIPTION = [ 'Generate a Provar XML test case skeleton with proper UUID v4 guids, sequential testItemId values, and structure.', 'Returns XML content. Writes to disk only when dry_run=false.', + 'URI-aware generation: use target_uri to control the XML nesting structure.', + ' - sf:ui:target (or omit target_uri) → flat Salesforce XML structure (existing behaviour).', + ' - ui:pageobject:target?pageId=pageobjects.PageClass → wraps all steps in a UiWithScreen element targeting that non-SF page object.', 'API IDs: shorthand forms (e.g. UiConnect, ApexSoqlQuery) are automatically expanded to fully-qualified IDs required by the Provar runtime.', 'Step arguments: attributes are emitted as — the only format the Provar runtime processes.', 'Shorthand XML attributes on are silently ignored at runtime; always supply arguments via the attributes map.', @@ -108,7 +111,7 @@ const TOOL_DESCRIPTION = [ 'ApexReadObject requires field names in attributes; omitting them produces MALFORMED_QUERY. Prefer ApexSoqlQuery.', 'AssertValues on SOQL results: index paths like "ResultList[0].Field" are not supported.', 'Use ForEach to iterate the result list, or SetValues to extract a field into a variable first.', - 'Validation: the response always includes a validation field with is_valid, validity_score, quality_score, and any structural issues — check this before attempting to run the test case.', + 'Validation: when validate_after_edit=true (default) the response includes a validation field and returns TESTCASE_INVALID if the generated XML fails structural checks.', 'Grounding: call provar.qualityhub.examples.retrieve before generating to get corpus examples for the scenario — correct XML structure for the step types you need.', ].join(' '); @@ -120,9 +123,26 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig test_case_name: z.string().describe('Test case name (human-readable label)'), test_case_id: z.string().optional().describe('Explicit test case id; auto-generated UUID v4 if omitted'), steps: z.array(StepSchema).default([]).describe('Ordered list of test steps'), + target_uri: z + .string() + .optional() + .describe( + 'Page object URI that determines the XML nesting structure. ' + + 'Omit or use "sf:ui:target" for Salesforce targets (flat structure). ' + + 'Use "ui:pageobject:target?pageId=pageobjects.PageClass" for non-SF page objects — ' + + 'steps are wrapped in a UiWithScreen element targeting that class.' + ), output_path: z.string().optional().describe('Suggested file path for the .xml file (returned in response)'), overwrite: z.boolean().default(false).describe('Overwrite if output_path file already exists'), dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path'), + validate_after_edit: z + .boolean() + .default(true) + .describe( + 'Run structural validation after generation (default: true). ' + + 'Returns TESTCASE_INVALID error if the generated XML fails validation. ' + + 'Set false to skip validation and omit the validation field from the response.' + ), idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), }, (input) => { @@ -131,6 +151,7 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig requestId, test_case_name: input.test_case_name, dry_run: input.dry_run, + target_uri: input.target_uri, }); try { @@ -157,16 +178,8 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig } const warnings = buildStepWarnings(input.steps); - const validationFull = validateTestCase(xmlContent, input.test_case_name); - const validationSlim = { - is_valid: validationFull.is_valid, - validity_score: validationFull.validity_score, - quality_score: validationFull.quality_score, - error_count: validationFull.error_count, - warning_count: validationFull.warning_count, - issues: validationFull.issues, - }; - const result = { + const runValidation = input.validate_after_edit !== false; + const baseResult = { requestId, xml_content: xmlContent, file_path: filePath, @@ -174,12 +187,40 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig dry_run: input.dry_run, step_count: input.steps.length, idempotency_key: input.idempotency_key, - validation: validationSlim, ...(warnings.length > 0 ? { warnings } : {}), }; + + if (runValidation) { + const validationFull = validateTestCase(xmlContent, input.test_case_name); + const validationSlim = { + is_valid: validationFull.is_valid, + validity_score: validationFull.validity_score, + quality_score: validationFull.quality_score, + error_count: validationFull.error_count, + warning_count: validationFull.warning_count, + issues: validationFull.issues, + }; + if (!validationFull.is_valid) { + const errResult = makeError( + 'TESTCASE_INVALID', + `Generated test case failed structural validation (${validationFull.error_count} error(s)). See details.validation.`, + requestId, + false, + { validation: validationSlim } + ); + log('warn', 'provar.testcase.generate: TESTCASE_INVALID', { requestId }); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; + } + const result = { ...baseResult, validation: validationSlim }; + return { + content: [{ type: 'text' as const, text: JSON.stringify(result) }], + structuredContent: result, + }; + } + return { - content: [{ type: 'text' as const, text: JSON.stringify(result) }], - structuredContent: result, + content: [{ type: 'text' as const, text: JSON.stringify(baseResult) }], + structuredContent: baseResult, }; } catch (err: unknown) { const error = err as Error & { code?: string }; @@ -198,59 +239,92 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig // ── XML builder ─────────────────────────────────────────────────────────────── -function buildArgumentsXml(attributes: Record): string { +function buildArgumentsXml(attributes: Record, baseIndent = ' '): string { const entries = Object.entries(attributes); if (entries.length === 0) return ''; const argLines = entries .map( ([k, v]) => - ` \n` + - ` ${escapeXmlContent(v)}\n` + - ' ' + `${baseIndent}\n` + + `${baseIndent} ${escapeXmlContent(v)}\n` + + `${baseIndent}` ) .join('\n'); - return `\n \n${argLines}\n \n `; + return `\n${baseIndent}\n${argLines}\n${baseIndent}\n${baseIndent.slice(0, -2)}`; +} + +function buildFlatStepXml( + step: { api_id: string; name: string; attributes: Record }, + testItemId: number, + indent: string +): string { + const guid = randomUUID(); + const resolvedApiId = resolveApiId(step.api_id); + const argumentsXml = buildArgumentsXml(step.attributes, indent + ' '); + if (argumentsXml) { + return ( + `${indent}${argumentsXml}` + ); + } + return ( + `${indent}` + ); } function buildTestCaseXml(input: { test_case_name: string; test_case_id?: string; steps: Array<{ api_id: string; name: string; attributes: Record }>; + target_uri?: string; }): string { const testCaseId = input.test_case_id ?? randomUUID(); const testCaseGuid = randomUUID(); const registryId = randomUUID(); - const stepLines = input.steps - .map((step, i) => { - const guid = randomUUID(); - const testItemId = i + 1; - const resolvedApiId = resolveApiId(step.api_id); - const argumentsXml = buildArgumentsXml(step.attributes); - if (argumentsXml) { - return ( - ` ${argumentsXml}` - ); - } - return ( - ` ` - ); - }) - .join('\n'); + let stepLines: string; + const isNonSf = !!input.target_uri && input.target_uri.startsWith('ui:'); + + if (isNonSf && input.target_uri) { + stepLines = buildUiWithScreenXml(input.steps, input.target_uri); + } else { + const lines = input.steps.map((step, i) => buildFlatStepXml(step, i + 1, ' ')).join('\n'); + stepLines = lines || ' '; + } return ( '\n' + `\n` + ' \n' + - (stepLines || ' ') + + stepLines + '\n \n' + '\n' ); } +function buildUiWithScreenXml( + steps: Array<{ api_id: string; name: string; attributes: Record }>, + targetUri: string +): string { + const wrapperGuid = randomUUID(); + const wrapperApiId = resolveApiId('UiWithScreen'); + // Inner steps use testItemIds starting at 3; the substeps clause itself occupies testItemId=2 + const innerLines = steps.map((step, i) => buildFlatStepXml(step, i + 3, ' ')).join('\n'); + const stepsContent = innerLines ? `\n${innerLines}\n ` : ''; + const clausesXml = + '\n \n' + + ' \n' + + ` ${stepsContent}\n` + + ' \n' + + ' \n '; + return ( + ` ${buildArgumentsXml({ target: targetUri }).trimEnd()}${clausesXml}` + ); +} + function escapeXmlAttr(value: string): string { return value.replace(/&/g, '&').replace(/"/g, '"').replace(//g, '>'); } diff --git a/src/mcp/tools/testCaseStepTools.ts b/src/mcp/tools/testCaseStepTools.ts new file mode 100644 index 0000000..7ba6e08 --- /dev/null +++ b/src/mcp/tools/testCaseStepTools.ts @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2024 Provar Limited. + * All rights reserved. + * Licensed under the BSD 3-Clause license. + * For full license text, see LICENSE.md file in the repo root or https://opensource.org/licenses/BSD-3-Clause + */ + +/* eslint-disable camelcase */ +import fs from 'node:fs'; +import path from 'node:path'; +import { z } from 'zod'; +import { XMLParser, XMLBuilder } from 'fast-xml-parser'; +import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; +import type { ServerConfig } from '../server.js'; +import { assertPathAllowed, PathPolicyError } from '../security/pathPolicy.js'; +import { makeError, makeRequestId } from '../schemas/common.js'; +import { log } from '../logging/logger.js'; +import { validateTestCase } from './testCaseValidate.js'; + +// ── XML parse / build config ────────────────────────────────────────────────── + +const PARSER_OPTIONS = { + ignoreAttributes: false, + attributeNamePrefix: '@_', + parseAttributeValue: false, + isArray: (tagName: string): boolean => tagName === 'apiCall', +}; + +const BUILDER_OPTIONS = { + ignoreAttributes: false, + attributeNamePrefix: '@_', + format: true, + indentBy: ' ', + suppressEmptyNode: false, +}; + +type ApiCallNode = Record; + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +function parseTestCaseXml(xmlContent: string): Record { + const parser = new XMLParser(PARSER_OPTIONS); + return parser.parse(xmlContent) as Record; +} + +function buildTestCaseXml(parsed: Record): string { + const builder = new XMLBuilder(BUILDER_OPTIONS); + return '\n' + (builder.build(parsed) as string); +} + +function getApiCalls(parsed: Record): ApiCallNode[] | null { + const tc = parsed['testCase']; + if (!tc || typeof tc !== 'object') return null; + const steps = (tc as Record)['steps']; + if (!steps || typeof steps !== 'object') return null; + const calls = (steps as Record)['apiCall']; + if (!Array.isArray(calls)) return null; + return calls as ApiCallNode[]; +} + +function collectAllTestItemIds(parsed: Record): string[] { + const calls = getApiCalls(parsed); + if (!calls) return []; + return calls.map((c) => c['@_testItemId']).filter((id): id is string => typeof id === 'string'); +} + +function parseNewStep(stepXml: string): { step: ApiCallNode } | { error: string } { + try { + const fragParser = new XMLParser(PARSER_OPTIONS); + const fragDoc = fragParser.parse(`${stepXml}`) as Record; + const rootEl = fragDoc['root'] as Record | undefined; + const callEl = rootEl?.['apiCall']; + if (!callEl) return { error: 'step_xml must contain exactly one element' }; + const calls = Array.isArray(callEl) ? (callEl as ApiCallNode[]) : [callEl as ApiCallNode]; + if (calls.length !== 1) return { error: 'step_xml must contain exactly one element' }; + return { step: calls[0] }; + } catch (e: unknown) { + return { error: (e as Error).message }; + } +} + +// ── Tool registration ───────────────────────────────────────────────────────── + +export function registerTestCaseStepEdit(server: McpServer, config: ServerConfig): void { + server.tool( + 'provar.testcase.step.edit', + [ + 'Add or remove a single step (apiCall) in a Provar XML test case file.', + 'Uses write-to-temp-then-rename to minimise partial-write risk.', + 'Prerequisites: the test case must exist and be valid XML.', + 'For mode=remove: supply test_item_id of the step to remove.', + 'For mode=add: supply test_item_id of the anchor step, position (before|after, default after),', + 'and step_xml (the ... XML fragment for the new step; must contain exactly one ).', + 'A backup is written to .bak before any mutation and restored automatically if', + 'the post-edit validation fails.', + 'Returns STEP_NOT_FOUND (with all_test_item_ids list) when the target step is absent.', + 'Returns INVALID_STEP_XML when step_xml cannot be parsed or contains ≠1 elements.', + 'Returns INVALID_XML_AFTER_EDIT (backup restored) when the mutated file fails validation.', + ].join(' '), + { + test_case_path: z.string().describe('Absolute path to the .testcase XML file; must be within --allowed-paths'), + mode: z.enum(['remove', 'add']).describe('"remove" to delete a step; "add" to insert a new step'), + test_item_id: z + .string() + .describe('For mode=remove: testItemId of the step to delete. For mode=add: testItemId of the anchor step.'), + position: z + .enum(['before', 'after']) + .optional() + .default('after') + .describe('Where to insert relative to the anchor step (mode=add only; default: after)'), + step_xml: z + .string() + .optional() + .describe( + 'The ... XML fragment for the new step (mode=add only). Must be well-formed XML.' + ), + validate_after_edit: z + .boolean() + .optional() + .default(true) + .describe('Run provar.testcase.validate after the mutation; restores backup on failure (default: true)'), + }, + (input) => { + const requestId = makeRequestId(); + log('info', 'provar.testcase.step.edit', { requestId, mode: input.mode, test_item_id: input.test_item_id }); + + try { + const resolvedPath = path.resolve(input.test_case_path); + const bakPath = resolvedPath + '.bak'; + + // Path policy — validate both the target file and its backup path + assertPathAllowed(resolvedPath, config.allowedPaths); + assertPathAllowed(bakPath, config.allowedPaths); + + // Validate step_xml up-front before touching the file + let newStep: ApiCallNode | null = null; + if (input.mode === 'add') { + if (!input.step_xml) { + const err = makeError('MISSING_INPUT', 'step_xml is required for mode=add', requestId); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + const parsed_step = parseNewStep(input.step_xml); + if ('error' in parsed_step) { + const err = makeError('INVALID_STEP_XML', `step_xml parse error: ${parsed_step.error}`, requestId); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + newStep = parsed_step.step; + } + + // Read the test case file + if (!fs.existsSync(resolvedPath)) { + const err = makeError('FILE_NOT_FOUND', `Test case not found: ${resolvedPath}`, requestId); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + const original = fs.readFileSync(resolvedPath, 'utf-8'); + + // Parse + let parsed: Record; + try { + parsed = parseTestCaseXml(original); + } catch (e: unknown) { + const err = makeError('INVALID_XML', `Cannot parse test case: ${(e as Error).message}`, requestId); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + + const apiCalls = getApiCalls(parsed); + if (!apiCalls) { + const err = makeError( + 'INVALID_XML', + 'Test case XML does not contain a structure', + requestId + ); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + + // Find target step + const targetIndex = apiCalls.findIndex((c) => String(c['@_testItemId']) === input.test_item_id); + if (targetIndex === -1) { + const allIds = collectAllTestItemIds(parsed); + const err = makeError( + 'STEP_NOT_FOUND', + `Step with testItemId "${input.test_item_id}" not found in ${resolvedPath}`, + requestId, + false, + { all_test_item_ids: allIds } + ); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + + // Mutate the parsed tree + if (input.mode === 'remove') { + apiCalls.splice(targetIndex, 1); + } else { + // mode=add + const insertAt = input.position === 'before' ? targetIndex : targetIndex + 1; + apiCalls.splice(insertAt, 0, newStep as ApiCallNode); + } + + // Rebuild XML + const mutatedXml = buildTestCaseXml(parsed); + + // Write backup, then write mutated file via temp→rename to minimise partial-write risk + const tmpPath = resolvedPath + '.tmp'; + fs.writeFileSync(bakPath, original, 'utf-8'); + fs.writeFileSync(tmpPath, mutatedXml, 'utf-8'); + fs.renameSync(tmpPath, resolvedPath); + + // Validate if requested + let validation: ReturnType | null | undefined; + if (input.validate_after_edit) { + try { + validation = validateTestCase(mutatedXml, path.basename(resolvedPath, '.testcase')); + } catch { + // treat thrown validation errors as failures + validation = null; + } + + if (!validation || !validation.is_valid) { + // Restore from backup + fs.writeFileSync(resolvedPath, original, 'utf-8'); + fs.unlinkSync(bakPath); + const err = makeError( + 'INVALID_XML_AFTER_EDIT', + `Validation failed after ${input.mode}; original file restored from backup`, + requestId, + false, + { validation_issues: validation?.issues ?? ['Validation threw an unexpected error'] } + ); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; + } + } + + // Success — delete backup + try { + fs.unlinkSync(bakPath); + } catch { + // non-fatal + } + + const result = { + requestId, + success: true, + test_item_id: input.test_item_id, + mode: input.mode, + ...(input.validate_after_edit && validation ? { validation } : {}), + }; + + return { + content: [{ type: 'text' as const, text: JSON.stringify(result) }], + }; + } catch (err: unknown) { + const error = err as Error & { code?: string }; + const errResult = makeError( + error instanceof PathPolicyError ? error.code : 'STEP_EDIT_ERROR', + error.message, + requestId + ); + log('error', 'provar.testcase.step.edit failed', { requestId, error: error.message }); + return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; + } + } + ); +} + +export function registerAllTestCaseStepTools(server: McpServer, config: ServerConfig): void { + registerTestCaseStepEdit(server, config); +} diff --git a/src/mcp/tools/testCaseValidate.ts b/src/mcp/tools/testCaseValidate.ts index b832047..607c9d3 100644 --- a/src/mcp/tools/testCaseValidate.ts +++ b/src/mcp/tools/testCaseValidate.ts @@ -170,6 +170,21 @@ export interface TestCaseValidationResult { validation_warning?: string; } +/** + * Reads a test case file from disk, validates it, and returns the result. + * Used by Wave 2 (testCaseGenerate) and Wave 3 (testCaseStepEdit) to validate + * after mutations without spawning a separate MCP tool call. + * Throws on path-policy violation or missing file. + */ +export function validateTestCaseXml(filePath: string, config: ServerConfig): TestCaseValidationResult { + assertPathAllowed(filePath, config.allowedPaths); + const resolved = path.resolve(filePath); + if (!fs.existsSync(resolved)) { + throw Object.assign(new Error(`File not found: ${resolved}`), { code: 'TESTCASE_FILE_NOT_FOUND' }); + } + return validateTestCase(fs.readFileSync(resolved, 'utf-8')); +} + /** Pure function — exported for unit testing */ export function validateTestCase(xmlContent: string, testName?: string): TestCaseValidationResult { const issues: ValidationIssue[] = []; diff --git a/src/services/auth/credentials.ts b/src/services/auth/credentials.ts index e582f87..f8c9e5c 100644 --- a/src/services/auth/credentials.ts +++ b/src/services/auth/credentials.ts @@ -15,7 +15,7 @@ export interface StoredCredentials { prefix: string; set_at: string; source: 'manual' | 'cognito' | 'salesforce'; - // Phase 2 fields — optional so Phase 1 files remain valid after upgrade + // Extended fields — optional so earlier credential files remain valid after upgrade username?: string; tier?: string; expires_at?: string; diff --git a/test/unit/mcp/antTools.test.ts b/test/unit/mcp/antTools.test.ts index 80fa8f0..65fdce4 100644 --- a/test/unit/mcp/antTools.test.ts +++ b/test/unit/mcp/antTools.test.ts @@ -11,7 +11,7 @@ import fs from 'node:fs'; import os from 'node:os'; import path from 'node:path'; import { describe, it, beforeEach, afterEach } from 'mocha'; -import { registerAntGenerate, validateAntXml } from '../../../src/mcp/tools/antTools.js'; +import { registerAntGenerate, validateAntXml, parseJUnitResults } from '../../../src/mcp/tools/antTools.js'; import type { ServerConfig } from '../../../src/mcp/server.js'; // ── Minimal McpServer mock ───────────────────────────────────────────────────── @@ -52,7 +52,7 @@ function minimalInput(overrides: Record = {}): Record { it('includes for CompileTask', () => { const xml = getXml(); - assert.ok( - xml.includes('com.provar.testrunner.ant.CompileTask'), - 'Expected CompileTask taskdef' - ); + assert.ok(xml.includes('com.provar.testrunner.ant.CompileTask'), 'Expected CompileTask taskdef'); }); it('includes for RunnerTask', () => { const xml = getXml(); - assert.ok( - xml.includes('com.provar.testrunner.ant.RunnerTask'), - 'Expected RunnerTask taskdef' - ); + assert.ok(xml.includes('com.provar.testrunner.ant.RunnerTask'), 'Expected RunnerTask taskdef'); }); it('includes for TestCycleReportTask', () => { const xml = getXml(); - assert.ok( - xml.includes('com.provar.testrunner.ant.TestCycleReportTask'), - 'Expected TestCycleReportTask taskdef' - ); + assert.ok(xml.includes('com.provar.testrunner.ant.TestCycleReportTask'), 'Expected TestCycleReportTask taskdef'); }); it('includes step', () => { @@ -184,10 +175,7 @@ describe('provar.ant.generate', () => { it('sets the provar.home property to the provided provar_home value', () => { const customHome = path.join(tmpDir, 'custom-provar'); const xml = getXml({ provar_home: customHome }); - assert.ok( - xml.includes(``), - 'Expected provar.home property' - ); + assert.ok(xml.includes(``), 'Expected provar.home property'); }); it('renders webBrowser attribute', () => { @@ -433,10 +421,7 @@ describe('provar.ant.generate', () => { assert.equal(isError(result), true); const code = parseText(result)['error_code'] as string; - assert.ok( - code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', - `Unexpected error code: ${code}` - ); + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected error code: ${code}`); }); it('output_path is not checked in dry_run=true mode', () => { @@ -464,20 +449,14 @@ describe('provar.ant.generate', () => { assert.equal(isError(result), true); const code = parseText(result)['error_code'] as string; - assert.ok( - code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', - `Unexpected error code: ${code}` - ); + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected error code: ${code}`); }); it('rejects project_path containing ".." (PATH_TRAVERSAL)', () => { const strictServer = new MockMcpServer(); registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call( - 'provar.ant.generate', - strictInput({ project_path: '../evil', dry_run: true }) - ); + const result = strictServer.call('provar.ant.generate', strictInput({ project_path: '../evil', dry_run: true })); assert.equal(isError(result), true); assert.equal(parseText(result)['error_code'], 'PATH_TRAVERSAL'); @@ -494,10 +473,7 @@ describe('provar.ant.generate', () => { assert.equal(isError(result), true); const code = parseText(result)['error_code'] as string; - assert.ok( - code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', - `Unexpected error code: ${code}` - ); + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected error code: ${code}`); }); it('rejects optional license_path outside allowedPaths when provided', () => { @@ -511,10 +487,7 @@ describe('provar.ant.generate', () => { assert.equal(isError(result), true); const code = parseText(result)['error_code'] as string; - assert.ok( - code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', - `Unexpected error code: ${code}` - ); + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected error code: ${code}`); }); }); }); @@ -545,20 +518,29 @@ describe('validateAntXml', () => { // No header — everything else is valid const noDecl = VALID_ANT.replace(/^<\?xml[^?]*\?>\n/, ''); const r = validateAntXml(noDecl); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_001'), 'Expected ANT_001 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_001'), + 'Expected ANT_001 warning' + ); // Still structurally valid — XML declaration is a warning only assert.equal(r.error_count, 0); }); it('ANT_002: flags malformed XML', () => { const r = validateAntXml(' i.rule_id === 'ANT_002'), 'Expected ANT_002 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_002'), + 'Expected ANT_002 error' + ); assert.equal(r.is_valid, false); }); it('ANT_003: flags wrong root element', () => { const r = validateAntXml(''); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_003'), 'Expected ANT_003 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_003'), + 'Expected ANT_003 error' + ); assert.equal(r.is_valid, false); }); }); @@ -567,7 +549,10 @@ describe('validateAntXml', () => { it('ANT_004: flags missing default attribute on ', () => { const xml = VALID_ANT.replace('default="runtests"', ''); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_004'), 'Expected ANT_004 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_004'), + 'Expected ANT_004 error' + ); }); it('ANT_005: flags missing CompileTask taskdef', () => { @@ -609,7 +594,10 @@ describe('validateAntXml', () => { it('ANT_006: flags default target not found', () => { const xml = VALID_ANT.replace('name="runtests"', 'name="something-else"'); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_006'), 'Expected ANT_006 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_006'), + 'Expected ANT_006 error' + ); }); }); @@ -627,17 +615,20 @@ describe('validateAntXml', () => { `; const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_010'), 'Expected ANT_010 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_010'), + 'Expected ANT_010 warning' + ); }); it('ANT_020: flags missing ', () => { // Replace the Run-Test-Case block with a noop comment - const xml = VALID_ANT.replace( - //, - '' - ); + const xml = VALID_ANT.replace(//, ''); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_020'), 'Expected ANT_020 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_020'), + 'Expected ANT_020 error' + ); }); }); @@ -656,7 +647,10 @@ describe('validateAntXml', () => { `; const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_021'), 'Expected ANT_021 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_021'), + 'Expected ANT_021 error' + ); }); it('ANT_022: flags missing projectPath', () => { @@ -673,13 +667,19 @@ describe('validateAntXml', () => { `; const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_022'), 'Expected ANT_022 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_022'), + 'Expected ANT_022 error' + ); }); it('ANT_023: flags missing resultsPath', () => { const xml = VALID_ANT.replace(' resultsPath="../ANT/Results"', ''); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_023'), 'Expected ANT_023 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_023'), + 'Expected ANT_023 error' + ); }); }); @@ -687,34 +687,37 @@ describe('validateAntXml', () => { it('ANT_030: warns about unrecognised webBrowser value', () => { const xml = VALID_ANT.replace('webBrowser="Chrome"', 'webBrowser="InternetExplorer6"'); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_030'), 'Expected ANT_030 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_030'), + 'Expected ANT_030 warning' + ); }); it('ANT_031: warns about unrecognised salesforceMetadataCache value', () => { - const xml = VALID_ANT.replace( - ' i.rule_id === 'ANT_031'), 'Expected ANT_031 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_031'), + 'Expected ANT_031 warning' + ); }); it('ANT_032: warns about unrecognised testOutputlevel value', () => { - const xml = VALID_ANT.replace( - ' i.rule_id === 'ANT_032'), 'Expected ANT_032 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_032'), + 'Expected ANT_032 warning' + ); }); it('ANT_033: warns about unrecognised resultsPathDisposition value', () => { - const xml = VALID_ANT.replace( - ' i.rule_id === 'ANT_033'), 'Expected ANT_033 warning'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_033'), + 'Expected ANT_033 warning' + ); }); }); @@ -722,13 +725,19 @@ describe('validateAntXml', () => { it('ANT_040: flags Run-Test-Case with no children', () => { const xml = VALID_ANT.replace('', ''); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_040'), 'Expected ANT_040 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_040'), + 'Expected ANT_040 error' + ); }); it('ANT_041: flags a fileset missing dir attribute', () => { const xml = VALID_ANT.replace('', ''); const r = validateAntXml(xml); - assert.ok(r.issues.some((i) => i.rule_id === 'ANT_041'), 'Expected ANT_041 error'); + assert.ok( + r.issues.some((i) => i.rule_id === 'ANT_041'), + 'Expected ANT_041 error' + ); }); it('reports fileset_count correctly', () => { @@ -769,3 +778,69 @@ describe('validateAntXml', () => { }); }); }); + +// ── parseJUnitResults ───────────────────────────────────────────────────────── + +describe('parseJUnitResults', () => { + let junitTmpDir: string; + + beforeEach(() => { + junitTmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'junit-test-')); + }); + + afterEach(() => { + fs.rmSync(junitTmpDir, { recursive: true, force: true }); + }); + + it('returns warning when results directory does not exist', () => { + const result = parseJUnitResults(path.join(junitTmpDir, 'nonexistent')); + assert.deepEqual(result.steps, []); + assert.ok(result.warning?.includes('not found')); + }); + + it('returns warning when directory contains no XML files', () => { + const result = parseJUnitResults(junitTmpDir); + assert.deepEqual(result.steps, []); + assert.ok(result.warning?.includes('No JUnit XML')); + }); + + it('extracts steps from a bare JUnit file', () => { + const xml = + ''; + fs.writeFileSync(path.join(junitTmpDir, 'JUnit.xml'), xml); + const result = parseJUnitResults(junitTmpDir); + assert.equal(result.steps.length, 2); + assert.equal(result.steps[0].status, 'pass'); + assert.equal(result.steps[1].status, 'fail'); + assert.ok(result.steps[1].errorMessage?.includes('Element not found')); + assert.equal(result.warning, undefined); + }); + + it('extracts steps from a wrapper JUnit file', () => { + const xml = + ''; + fs.writeFileSync(path.join(junitTmpDir, 'JUnit.xml'), xml); + const result = parseJUnitResults(junitTmpDir); + assert.equal(result.steps.length, 1); + assert.equal(result.steps[0].status, 'skip'); + assert.equal(result.steps[0].title, 'TC1'); + }); + + it('returns warning when XML parses but contains no testcase elements', () => { + const xml = ''; + fs.writeFileSync(path.join(junitTmpDir, 'JUnit.xml'), xml); + const result = parseJUnitResults(junitTmpDir); + assert.deepEqual(result.steps, []); + assert.ok((result.warning?.length ?? 0) > 0); + }); + + it('combines message attribute and CDATA body in failure text', () => { + const xml = + ''; + fs.writeFileSync(path.join(junitTmpDir, 'JUnit.xml'), xml); + const result = parseJUnitResults(junitTmpDir); + assert.equal(result.steps.length, 1); + assert.ok(result.steps[0].errorMessage?.includes('Execution failed')); + assert.ok(result.steps[0].errorMessage?.includes('stack trace here')); + }); +}); diff --git a/test/unit/mcp/automationTools.test.ts b/test/unit/mcp/automationTools.test.ts index 9d16a99..110da3f 100644 --- a/test/unit/mcp/automationTools.test.ts +++ b/test/unit/mcp/automationTools.test.ts @@ -17,6 +17,7 @@ import { needsWindowsShell, setSfPlatformForTesting, filterTestRunOutput, + setSfResultsPathForTesting, } from '../../../src/mcp/tools/automationTools.js'; // ── Minimal mock server ─────────────────────────────────────────────────────── @@ -156,6 +157,76 @@ describe('automationTools', () => { const body = parseBody(result); assert.equal(body.output_lines_suppressed, undefined, 'output_lines_suppressed should be absent'); }); + + it('includes structured steps[] when JUnit XML is present in results dir', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'junit-test-')); + const junitXml = ` + + + + stack trace + + + + +`; + fs.writeFileSync(path.join(tmpDir, 'results.xml'), junitXml, 'utf-8'); + setSfResultsPathForTesting(tmpDir); + + try { + spawnStub.returns(makeSpawnResult('tests done', '', 0)); + const result = server.call('provar.automation.testrun', { flags: [] }); + const body = parseBody(result); + assert.ok(Array.isArray(body.steps), 'steps should be an array'); + const steps = body.steps as Array<{ testItemId: string; title: string; status: string; errorMessage?: string }>; + assert.equal(steps.length, 3); + assert.equal(steps[0].status, 'pass'); + assert.equal(steps[1].status, 'fail'); + assert.ok( + (steps[1].errorMessage as string).includes('Element not found'), + 'errorMessage should include the failure message' + ); + assert.equal(steps[2].status, 'skip'); + } finally { + setSfResultsPathForTesting(undefined); + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('omits steps[] and adds details.warning when results dir has no XML', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'junit-empty-')); + setSfResultsPathForTesting(tmpDir); + + try { + spawnStub.returns(makeSpawnResult('tests done', '', 0)); + const result = server.call('provar.automation.testrun', { flags: [] }); + const body = parseBody(result); + assert.equal(body.steps, undefined, 'steps should be absent when no XML found'); + assert.ok(body.details, 'details should be present'); + const details = body.details as Record; + assert.ok(typeof details.warning === 'string', 'details.warning should be a string'); + } finally { + setSfResultsPathForTesting(undefined); + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('omits steps[] and adds details.warning when results dir contains malformed XML', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'junit-bad-')); + fs.writeFileSync(path.join(tmpDir, 'results.xml'), '', 'utf-8'); + setSfResultsPathForTesting(tmpDir); + + try { + spawnStub.returns(makeSpawnResult('tests done', '', 0)); + const result = server.call('provar.automation.testrun', { flags: [] }); + const body = parseBody(result); + assert.equal(body.steps, undefined, 'steps should be absent when XML is malformed'); + assert.ok(body.details, 'details should be present with warning'); + } finally { + setSfResultsPathForTesting(undefined); + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); }); // ── provar.automation.compile ───────────────────────────────────────────── diff --git a/test/unit/mcp/bestPracticesEngine.test.ts b/test/unit/mcp/bestPracticesEngine.test.ts index c4fcbb3..3e7ba1b 100644 --- a/test/unit/mcp/bestPracticesEngine.test.ts +++ b/test/unit/mcp/bestPracticesEngine.test.ts @@ -8,19 +8,11 @@ /* eslint-disable camelcase */ import { strict as assert } from 'node:assert'; import { describe, it } from 'mocha'; -import { - calculateBPScore, - runBestPractices, - type BPViolation, -} from '../../../src/mcp/tools/bestPracticesEngine.js'; +import { calculateBPScore, runBestPractices, type BPViolation } from '../../../src/mcp/tools/bestPracticesEngine.js'; // ── Helper: build a minimal violation ──────────────────────────────────────── -function makeViolation( - severity: BPViolation['severity'], - weight: number, - count?: number -): BPViolation { +function makeViolation(severity: BPViolation['severity'], weight: number, count?: number): BPViolation { return { rule_id: 'TEST-001', name: 'Test Violation', @@ -37,9 +29,9 @@ function makeViolation( // ── Valid XML fixture (passes all schema-level rules) ───────────────────────── -const GUID_TC = '550e8400-e29b-41d4-a716-446655440000'; -const GUID_S1 = '550e8400-e29b-41d4-a716-446655440011'; -const GUID_S2 = '550e8400-e29b-41d4-a716-446655440012'; +const GUID_TC = '550e8400-e29b-41d4-a716-446655440000'; +const GUID_S1 = '550e8400-e29b-41d4-a716-446655440011'; +const GUID_S2 = '550e8400-e29b-41d4-a716-446655440012'; const VALID_XML = ` @@ -97,10 +89,7 @@ describe('calculateBPScore', () => { // critical weight=5: 100 - 5 = 95 // major weight=4: 95 - 3 = 92 // net: 92 - const score = calculateBPScore([ - makeViolation('critical', 5), - makeViolation('major', 4), - ]); + const score = calculateBPScore([makeViolation('critical', 5), makeViolation('major', 4)]); assert.equal(score, 92); }); @@ -122,7 +111,7 @@ describe('runBestPractices', () => { describe('valid XML test case', () => { it('returns a quality_score between 0 and 100', () => { const result = runBestPractices(VALID_XML); - assert.ok(result.quality_score >= 0, `score ${result.quality_score} should be >= 0`); + assert.ok(result.quality_score >= 0, `score ${result.quality_score} should be >= 0`); assert.ok(result.quality_score <= 100, `score ${result.quality_score} should be <= 100`); }); @@ -166,4 +155,56 @@ describe('runBestPractices', () => { assert.equal(result.quality_score, recalculated); }); }); + + describe('uiWithScreenTarget validator', () => { + const GUID_UWS = '550e8400-e29b-41d4-a716-446655440020'; + const GUID_TC2 = '550e8400-e29b-41d4-a716-446655440030'; + + function buildUwsXml(target: string): string { + return ` + + + + + + ${target} + + + + +`; + } + + it('passes for a valid SF target URI (sf:ui:target?object=Account&action=view)', () => { + const result = runBestPractices(buildUwsXml('sf:ui:target?object=Account&action=view')); + const uwsViolation = result.violations.find( + (v) => v.rule_id.includes('UI-SCREEN') || v.message.includes('UiWithScreen') + ); + assert.ok(!uwsViolation, `Expected no uiWithScreenTarget violation, got: ${uwsViolation?.message}`); + }); + + it('passes for a valid page object target URI (ui:pageobject:target?pageId=pageobjects.LoginPage)', () => { + const result = runBestPractices(buildUwsXml('ui:pageobject:target?pageId=pageobjects.LoginPage')); + const uwsViolation = result.violations.find( + (v) => v.message.includes('UiWithScreen') || v.message.includes('pageId') + ); + assert.ok(!uwsViolation, `Expected no uiWithScreenTarget violation, got: ${uwsViolation?.message}`); + }); + + it('fires for page object target in colon format (ui:pageobject:target:com.example.Class)', () => { + const result = runBestPractices(buildUwsXml('ui:pageobject:target:com.example.LoginPage')); + const uwsViolation = result.violations.find( + (v) => v.message.includes('colon format') || v.message.includes('UiWithScreen') + ); + assert.ok(uwsViolation, 'Expected uiWithScreenTarget violation for colon format URI'); + }); + + it('fires for page object target with pageId missing pageobjects. prefix', () => { + const result = runBestPractices(buildUwsXml('ui:pageobject:target?pageId=LoginPage')); + const uwsViolation = result.violations.find( + (v) => v.message.includes('pageobjects.') || v.message.includes('UiWithScreen') + ); + assert.ok(uwsViolation, 'Expected uiWithScreenTarget violation for missing pageobjects. prefix'); + }); + }); }); diff --git a/test/unit/mcp/connectionTools.test.ts b/test/unit/mcp/connectionTools.test.ts new file mode 100644 index 0000000..4da062f --- /dev/null +++ b/test/unit/mcp/connectionTools.test.ts @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2024 Provar Limited. + * All rights reserved. + * Licensed under the BSD 3-Clause license. + * For full license text, see LICENSE.md file in the repo root or https://opensource.org/licenses/BSD-3-Clause + */ + +/* eslint-disable camelcase */ +import { strict as assert } from 'node:assert'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { describe, it, beforeEach, afterEach } from 'mocha'; +import { registerConnectionList } from '../../../src/mcp/tools/connectionTools.js'; +import type { ServerConfig } from '../../../src/mcp/server.js'; + +// ── Minimal McpServer mock ──────────────────────────────────────────────────── + +type ToolHandler = (args: Record) => unknown; + +class MockMcpServer { + private handlers = new Map(); + + public tool(name: string, _description: string, _schema: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + + public call(name: string, args: Record): ReturnType { + const h = this.handlers.get(name); + if (!h) throw new Error(`Tool not registered: ${name}`); + return h(args); + } +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +function parseText(result: unknown): Record { + const r = result as { content: Array<{ type: string; text: string }> }; + return JSON.parse(r.content[0].text) as Record; +} + +function isError(result: unknown): boolean { + return (result as { isError?: boolean }).isError === true; +} + +function writeTestProject(dir: string, content: string): void { + fs.writeFileSync(path.join(dir, '.testproject'), content, 'utf-8'); +} + +// ── .testproject fixture content ────────────────────────────────────────────── + +const BASIC_TEST_PROJECT = ` + + + + + + + + + + + + + + + + + + + + + + +`; + +const EMPTY_TEST_PROJECT = ` + + +`; + +// ── Test setup ──────────────────────────────────────────────────────────────── + +let tmpDir: string; +let server: MockMcpServer; +let config: ServerConfig; + +beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'conntools-test-')); + server = new MockMcpServer(); + config = { allowedPaths: [tmpDir] }; + registerConnectionList(server as never, config); +}); + +afterEach(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); +}); + +// ── provar.connection.list ──────────────────────────────────────────────────── + +describe('provar.connection.list', () => { + describe('happy path', () => { + it('returns connections array with name, type, and url', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + + assert.equal(isError(result), false); + const body = parseText(result); + const connections = body['connections'] as Array>; + assert.ok(Array.isArray(connections), 'Expected connections array'); + assert.equal(connections.length, 4, 'Expected 4 connections'); + }); + + it('maps sf class to Salesforce type', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const connections = parseText(result)['connections'] as Array>; + const sfConns = connections.filter((c) => c['type'] === 'Salesforce'); + assert.equal(sfConns.length, 2, 'Expected 2 Salesforce connections'); + assert.equal(sfConns[0]['name'], 'MyOrg'); + }); + + it('maps ui class to Web type', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const connections = parseText(result)['connections'] as Array>; + const webConns = connections.filter((c) => c['type'] === 'Web'); + assert.equal(webConns.length, 1); + assert.equal(webConns[0]['name'], 'Chrome'); + }); + + it('marks sso class connections as sso_configured=true', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const connections = parseText(result)['connections'] as Array>; + const ssoConn = connections.find((c) => c['name'] === 'OktaSso'); + assert.ok(ssoConn, 'Expected OktaSso connection'); + assert.equal(ssoConn['sso_configured'], true); + }); + + it('marks non-sso connections as sso_configured=false', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const connections = parseText(result)['connections'] as Array>; + const sfConn = connections.find((c) => c['name'] === 'MyOrg'); + assert.ok(sfConn); + assert.equal(sfConn['sso_configured'], false); + }); + + it('returns environments with name, connection, and url', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const environments = parseText(result)['environments'] as Array>; + assert.ok(Array.isArray(environments)); + assert.equal(environments.length, 2); + const qa = environments.find((e) => e['name'] === 'QA'); + assert.ok(qa); + assert.equal(qa['connection'], 'MyOrg'); + assert.equal(qa['url'], 'https://qa.example.com'); + }); + + it('returns environment without url when not present', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const environments = parseText(result)['environments'] as Array>; + const uat = environments.find((e) => e['name'] === 'UAT'); + assert.ok(uat); + assert.equal(uat['url'], undefined, 'UAT has no url attribute'); + }); + + it('returns summary with correct counts', () => { + writeTestProject(tmpDir, BASIC_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const summary = parseText(result)['summary'] as Record; + assert.equal(summary['connection_count'], 4); + assert.equal(summary['environment_count'], 2); + }); + + it('returns empty arrays for project with no connections or environments', () => { + writeTestProject(tmpDir, EMPTY_TEST_PROJECT); + const result = server.call('provar.connection.list', { project_path: tmpDir }); + assert.equal(isError(result), false); + const body = parseText(result); + assert.deepEqual(body['connections'], []); + assert.deepEqual(body['environments'], []); + }); + }); + + describe('error cases', () => { + it('returns CONNECTION_FILE_NOT_FOUND when .testproject is missing', () => { + const result = server.call('provar.connection.list', { project_path: tmpDir }); + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'CONNECTION_FILE_NOT_FOUND'); + }); + + it('CONNECTION_FILE_NOT_FOUND includes a suggestion', () => { + const result = server.call('provar.connection.list', { project_path: tmpDir }); + const body = parseText(result); + const details = body['details'] as Record; + assert.ok(details?.['suggestion'], 'Expected suggestion in details'); + }); + + it('returns PATH_NOT_ALLOWED when project_path is outside allowed paths', () => { + const strictServer = new MockMcpServer(); + registerConnectionList(strictServer as never, { allowedPaths: [tmpDir] }); + const result = strictServer.call('provar.connection.list', { + project_path: path.join(os.tmpdir(), 'some-other-project'), + }); + assert.equal(isError(result), true); + const code = parseText(result)['error_code'] as string; + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected: ${code}`); + }); + + it('returns CONNECTION_XML_PARSE_ERROR for malformed .testproject XML', () => { + writeTestProject(tmpDir, ' { package_name: 'pageobjects', page_type: 'standard', fields: [ - { name: 'accountName', locator_strategy: 'xpath', locator_value: "//input[@name='accountName']", element_type: 'TextType' }, - { name: 'saveButton', locator_strategy: 'css', locator_value: "[data-testid='save']", element_type: 'ButtonType' }, + { + name: 'accountName', + locator_strategy: 'xpath', + locator_value: "//input[@name='accountName']", + element_type: 'TextType', + }, + { + name: 'saveButton', + locator_strategy: 'css', + locator_value: "[data-testid='save']", + element_type: 'ButtonType', + }, ], dry_run: true, overwrite: false, @@ -362,4 +372,148 @@ describe('provar.pageobject.generate', () => { assert.equal(parseText(result)['idempotency_key'], undefined); }); }); + + describe('sso_class — ILoginPage stub generation', () => { + it('returns sso_stub_source when sso_class is provided', () => { + const result = server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + dry_run: true, + overwrite: false, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + const ssoSource = body['sso_stub_source'] as string; + assert.ok(typeof ssoSource === 'string' && ssoSource.length > 0, 'Expected sso_stub_source'); + assert.ok(ssoSource.includes('implements ILoginPage'), 'Stub must implement ILoginPage'); + assert.ok(ssoSource.includes('class LoginPageSso'), 'Stub class name must match sso_class'); + }); + + it('sso stub includes loginAs and logout method stubs', () => { + const result = server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + dry_run: true, + overwrite: false, + }); + + const ssoSource = parseText(result)['sso_stub_source'] as string; + assert.ok(ssoSource.includes('loginAs'), 'Expected loginAs method'); + assert.ok(ssoSource.includes('logout'), 'Expected logout method'); + }); + + it('uses the correct package in sso stub', () => { + const result = server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects.auth', + page_type: 'standard', + fields: [], + sso_class: 'AuthSso', + dry_run: true, + overwrite: false, + }); + + const ssoSource = parseText(result)['sso_stub_source'] as string; + assert.ok(ssoSource.includes('package pageobjects.auth;'), 'Expected correct package'); + }); + + it('does not include sso fields when sso_class is omitted', () => { + const result = server.call('provar.pageobject.generate', { + class_name: 'AccountPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + dry_run: true, + overwrite: false, + }); + + const body = parseText(result); + assert.ok(!('sso_stub_source' in body), 'sso_stub_source should be absent when no sso_class'); + }); + + it('writes both page object and SSO stub to disk when dry_run=false', () => { + const poPath = path.join(tmpDir, 'LoginPage.java'); + const result = server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + output_path: poPath, + dry_run: false, + overwrite: false, + }); + + assert.equal(isError(result), false); + assert.equal(fs.existsSync(poPath), true, 'Page object file should be written'); + const ssoPath = path.join(tmpDir, 'LoginPageSso.java'); + assert.equal(fs.existsSync(ssoPath), true, 'SSO stub file should be written'); + }); + + it('validates path policy on SSO stub path', () => { + const strictServer = new MockMcpServer(); + registerPageObjectGenerate(strictServer as never, { allowedPaths: [tmpDir] }); + + const result = strictServer.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + output_path: path.join(os.tmpdir(), 'some-other-dir', 'LoginPage.java'), + dry_run: false, + overwrite: false, + }); + + assert.equal(isError(result), true); + const code = parseText(result)['error_code'] as string; + assert.ok(code === 'PATH_NOT_ALLOWED' || code === 'PATH_TRAVERSAL', `Unexpected: ${code}`); + }); + + it('returns FILE_EXISTS when SSO stub already exists and overwrite=false', () => { + const poPath = path.join(tmpDir, 'LoginPage.java'); + const ssoPath = path.join(tmpDir, 'LoginPageSso.java'); + fs.writeFileSync(ssoPath, '// existing stub', 'utf-8'); + + const result = server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + output_path: poPath, + dry_run: false, + overwrite: false, + }); + + assert.equal(isError(result), true); + assert.equal(parseText(result)['error_code'], 'FILE_EXISTS'); + }); + + it('does not write main file when SSO stub preflight fails (atomic write)', () => { + const poPath = path.join(tmpDir, 'LoginPage.java'); + const ssoPath = path.join(tmpDir, 'LoginPageSso.java'); + fs.writeFileSync(ssoPath, '// existing stub', 'utf-8'); + + server.call('provar.pageobject.generate', { + class_name: 'LoginPage', + package_name: 'pageobjects', + page_type: 'standard', + fields: [], + sso_class: 'LoginPageSso', + output_path: poPath, + dry_run: false, + overwrite: false, + }); + + assert.equal(fs.existsSync(poPath), false, 'Main .java should not be written when SSO preflight fails'); + }); + }); }); diff --git a/test/unit/mcp/propertiesTools.test.ts b/test/unit/mcp/propertiesTools.test.ts index 9400970..0ba6a52 100644 --- a/test/unit/mcp/propertiesTools.test.ts +++ b/test/unit/mcp/propertiesTools.test.ts @@ -16,6 +16,7 @@ import { registerPropertiesRead, registerPropertiesSet, registerPropertiesValidate, + setSfConfigDirForTesting, } from '../../../src/mcp/tools/propertiesTools.js'; import type { ServerConfig } from '../../../src/mcp/server.js'; @@ -219,13 +220,81 @@ describe('provar.properties.read', () => { assert.equal(content['projectPath'], '/proj'); }); - it('returns FILE_NOT_FOUND when file does not exist', () => { + it('returns PROPERTIES_FILE_NOT_FOUND when file does not exist', () => { const result = server.call('provar.properties.read', { file_path: path.join(tmpDir, 'missing.json'), }); assert.equal(isError(result), true); - assert.equal(parseText(result)['error_code'], 'FILE_NOT_FOUND'); + const body = parseText(result); + assert.equal(body['error_code'], 'PROPERTIES_FILE_NOT_FOUND'); + assert.ok((body['message'] as string).includes('provar.properties.generate'), 'suggestion should mention generate'); + }); + + it('surfaces divergence warning when active sf config points to a different file with different values', () => { + // "disk" file — the one we're about to read + const diskFile = path.join(tmpDir, 'props-disk.json'); + fs.writeFileSync(diskFile, JSON.stringify({ provarHome: '/disk/provar', projectPath: '/disk/proj' }), 'utf-8'); + + // "active" file — what config.load registered + const activeFile = path.join(tmpDir, 'props-active.json'); + fs.writeFileSync( + activeFile, + JSON.stringify({ provarHome: '/active/provar', projectPath: '/active/proj' }), + 'utf-8' + ); + + // Write a temp sf config pointing to the active file + const sfDir = path.join(tmpDir, '.sf'); + fs.mkdirSync(sfDir, { recursive: true }); + fs.writeFileSync( + path.join(sfDir, 'config.json'), + JSON.stringify({ PROVARDX_PROPERTIES_FILE_PATH: activeFile }), + 'utf-8' + ); + + setSfConfigDirForTesting(sfDir); + try { + const result = server.call('provar.properties.read', { file_path: diskFile }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.ok(body['details'], 'details should be present'); + const details = body['details'] as Record; + assert.ok(typeof details['warning'] === 'string', 'details.warning should be a string'); + assert.ok( + typeof details['warning'] === 'string' && details['warning'].includes('provarHome'), + 'warning should mention the divergent key' + ); + } finally { + setSfConfigDirForTesting(null); + } + }); + + it('does not surface divergence warning when reading the same file that is active', () => { + const filePath = path.join(tmpDir, 'props.json'); + const props = { provarHome: '/opt/provar', projectPath: '/proj' }; + fs.writeFileSync(filePath, JSON.stringify(props), 'utf-8'); + + // Active file points to the same file + const sfDir = path.join(tmpDir, '.sf'); + fs.mkdirSync(sfDir, { recursive: true }); + fs.writeFileSync( + path.join(sfDir, 'config.json'), + JSON.stringify({ PROVARDX_PROPERTIES_FILE_PATH: filePath }), + 'utf-8' + ); + + setSfConfigDirForTesting(sfDir); + try { + const result = server.call('provar.properties.read', { file_path: filePath }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.ok(!body['details'], 'details should be absent when no divergence'); + } finally { + setSfConfigDirForTesting(null); + } }); it('returns MALFORMED_JSON when file contains invalid JSON', () => { @@ -260,7 +329,12 @@ describe('provar.properties.set', () => { const initial = { provarHome: '/opt/provar', projectPath: '/proj', - environment: { webBrowser: 'Chrome', webBrowserConfig: 'default', webBrowserProviderName: 'Desktop', webBrowserDeviceName: 'HD' }, + environment: { + webBrowser: 'Chrome', + webBrowserConfig: 'default', + webBrowserProviderName: 'Desktop', + webBrowserDeviceName: 'HD', + }, }; fs.writeFileSync(filePath, JSON.stringify(initial, null, 2), 'utf-8'); @@ -316,14 +390,14 @@ describe('provar.properties.set', () => { assert.deepEqual(written['testCase'], ['new/test2.testcase']); }); - it('returns FILE_NOT_FOUND when file does not exist', () => { + it('returns PROPERTIES_FILE_NOT_FOUND when file does not exist', () => { const result = server.call('provar.properties.set', { file_path: path.join(tmpDir, 'ghost.json'), updates: { provarHome: '/x' }, }); assert.equal(isError(result), true); - assert.equal(parseText(result)['error_code'], 'FILE_NOT_FOUND'); + assert.equal(parseText(result)['error_code'], 'PROPERTIES_FILE_NOT_FOUND'); }); it('returns PATH_NOT_ALLOWED for path outside allowedPaths', () => { @@ -364,7 +438,10 @@ describe('provar.properties.validate', () => { const body = parseText(result); assert.equal(body['is_valid'], false); const errors = body['errors'] as Array<{ field: string }>; - assert.ok(errors.some((e) => e.field === 'provarHome'), 'Expected error for provarHome'); + assert.ok( + errors.some((e) => e.field === 'provarHome'), + 'Expected error for provarHome' + ); }); it('reports warning for placeholder value ${PROVAR_HOME}', () => { @@ -407,13 +484,13 @@ describe('provar.properties.validate', () => { assert.equal(parseText(result)['error_code'], 'MISSING_INPUT'); }); - it('returns FILE_NOT_FOUND when file_path points to a missing file', () => { + it('returns PROPERTIES_FILE_NOT_FOUND when file_path points to a missing file', () => { const result = server.call('provar.properties.validate', { file_path: path.join(tmpDir, 'nope.json'), }); assert.equal(isError(result), true); - assert.equal(parseText(result)['error_code'], 'FILE_NOT_FOUND'); + assert.equal(parseText(result)['error_code'], 'PROPERTIES_FILE_NOT_FOUND'); }); it('returns is_valid=false with root-level error for malformed JSON content', () => { @@ -434,7 +511,10 @@ describe('provar.properties.validate', () => { const body = parseText(result); assert.equal(body['is_valid'], false); const errors = body['errors'] as Array<{ field: string }>; - assert.ok(errors.some((e) => e.field === 'environment.webBrowser'), 'Expected error on environment.webBrowser'); + assert.ok( + errors.some((e) => e.field === 'environment.webBrowser'), + 'Expected error on environment.webBrowser' + ); }); it('flags invalid metadataLevel enum value', () => { @@ -446,6 +526,9 @@ describe('provar.properties.validate', () => { const body = parseText(result); assert.equal(body['is_valid'], false); const errors = body['errors'] as Array<{ field: string }>; - assert.ok(errors.some((e) => e.field === 'metadata.metadataLevel'), 'Expected error on metadata.metadataLevel'); + assert.ok( + errors.some((e) => e.field === 'metadata.metadataLevel'), + 'Expected error on metadata.metadataLevel' + ); }); }); diff --git a/test/unit/mcp/qualityHubTools.test.ts b/test/unit/mcp/qualityHubTools.test.ts index 81cf1a0..d80434c 100644 --- a/test/unit/mcp/qualityHubTools.test.ts +++ b/test/unit/mcp/qualityHubTools.test.ts @@ -31,11 +31,23 @@ class MockMcpServer { // ── Helpers ─────────────────────────────────────────────────────────────────── -function makeSpawnResult(stdout: string, stderr: string, status: number): { stdout: string; stderr: string; status: number; error: undefined; pid: number; output: never[]; signal: null } { +function makeSpawnResult( + stdout: string, + stderr: string, + status: number +): { stdout: string; stderr: string; status: number; error: undefined; pid: number; output: never[]; signal: null } { return { stdout, stderr, status, error: undefined, pid: 1, output: [], signal: null }; } -function makeEnoentResult(): { stdout: string; stderr: string; status: null; error: Error & { code: string }; pid: undefined; output: never[]; signal: null } { +function makeEnoentResult(): { + stdout: string; + stderr: string; + status: null; + error: Error & { code: string }; + pid: undefined; + output: never[]; + signal: null; +} { const err = Object.assign(new Error('spawn sf ENOENT'), { code: 'ENOENT' }); return { stdout: '', stderr: '', status: null, error: err, pid: undefined, output: [], signal: null }; } @@ -167,6 +179,40 @@ describe('qualityHubTools', () => { const result = server.call('provar.qualityhub.testrun', { target_org: 'myorg', flags: [] }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); + + it('adds wildcard warning when flags contain * glob pattern', () => { + spawnStub.returns(makeSpawnResult('run started', '', 0)); + const result = server.call('provar.qualityhub.testrun', { + target_org: 'myorg', + flags: ['--plan-name', 'Suite/E2E*'], + }); + assert.equal(isError(result), false, 'wildcard should not block execution'); + const body = parseBody(result); + const details = body.details as Record | undefined; + assert.ok(details?.warning, 'Expected warning in details'); + assert.ok((details.warning as string).includes('Wildcard'), 'Warning should mention wildcard'); + }); + + it('adds wildcard warning when flags contain ? pattern', () => { + spawnStub.returns(makeSpawnResult('run started', '', 0)); + const result = server.call('provar.qualityhub.testrun', { + target_org: 'myorg', + flags: ['--plan-name', 'Suite?Test'], + }); + const body = parseBody(result); + const details = body.details as Record | undefined; + assert.ok(details?.warning, 'Expected warning for ? wildcard'); + }); + + it('does not add warning for exact plan name flags', () => { + spawnStub.returns(makeSpawnResult('run started', '', 0)); + const result = server.call('provar.qualityhub.testrun', { + target_org: 'myorg', + flags: ['--plan-name', 'SmokeTests'], + }); + const body = parseBody(result); + assert.ok(!body.details, 'No details warning for exact plan name'); + }); }); // ── provar.qualityhub.testrun.report ───────────────────────────────────────── @@ -182,58 +228,66 @@ describe('qualityHubTools', () => { it('returns QH_REPORT_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'not found', 1)); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'QH_REPORT_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); describe('failure detection', () => { it('sets suggestion when JSON result.status is "FAILED"', () => { - spawnStub.returns(makeSpawnResult( - JSON.stringify({ result: { status: 'FAILED' } }), - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'FAILED' } }), '', 0)); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); assert.ok(typeof body.suggestion === 'string' && body.suggestion.length > 0, 'Expected suggestion when FAILED'); }); it('sets suggestion when JSON result.status is "FAIL"', () => { - spawnStub.returns(makeSpawnResult( - JSON.stringify({ result: { status: 'FAIL' } }), - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'FAIL' } }), '', 0)); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); assert.ok(typeof body.suggestion === 'string' && body.suggestion.length > 0, 'Expected suggestion when FAIL'); }); it('does NOT set suggestion when status is "RUNNING"', () => { - spawnStub.returns(makeSpawnResult( - JSON.stringify({ result: { status: 'RUNNING' } }), - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'RUNNING' } }), '', 0)); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); assert.ok(!body.suggestion, 'Expected no suggestion for non-failure status'); }); it('does NOT set suggestion when status is "PASSED"', () => { - spawnStub.returns(makeSpawnResult( - JSON.stringify({ result: { status: 'PASSED' } }), - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'PASSED' } }), '', 0)); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); assert.ok(!body.suggestion, 'Expected no suggestion when PASSED'); }); @@ -241,26 +295,31 @@ describe('qualityHubTools', () => { it('does NOT false-positive on "failure" in plain text output (word in non-status context)', () => { // Before PR #110 the check was /fail/i which would match "failure" anywhere in output; // now it only matches the "status" field value. - spawnStub.returns(makeSpawnResult( - '{"message": "No failure detected in this output", "result": {"status": "PASSED"}}', - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns( + makeSpawnResult('{"message": "No failure detected in this output", "result": {"status": "PASSED"}}', '', 0) + ); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); assert.ok(!body.suggestion, 'Expected no suggestion — "failure" in message should not trigger detection'); }); it('falls back to regex extraction when stdout is not valid JSON', () => { // Non-JSON output with "status": "FAILED" substring - spawnStub.returns(makeSpawnResult( - '"status": "FAILED"', - '', - 0 - )); - const result = server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + spawnStub.returns(makeSpawnResult('"status": "FAILED"', '', 0)); + const result = server.call('provar.qualityhub.testrun.report', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); const body = parseBody(result); - assert.ok(typeof body.suggestion === 'string' && body.suggestion.length > 0, 'Expected suggestion from regex fallback'); + assert.ok( + typeof body.suggestion === 'string' && body.suggestion.length > 0, + 'Expected suggestion from regex fallback' + ); }); }); }); @@ -278,14 +337,22 @@ describe('qualityHubTools', () => { it('returns QH_ABORT_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'abort failed', 1)); - const result = server.call('provar.qualityhub.testrun.abort', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + const result = server.call('provar.qualityhub.testrun.abort', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'QH_ABORT_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testrun.abort', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + const result = server.call('provar.qualityhub.testrun.abort', { + target_org: 'myorg', + run_id: 'abc-123', + flags: [], + }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); }); @@ -295,7 +362,10 @@ describe('qualityHubTools', () => { describe('provar.qualityhub.testcase.retrieve', () => { it('calls sf with testcase retrieve args', () => { spawnStub.returns(makeSpawnResult('[]', '', 0)); - const result = server.call('provar.qualityhub.testcase.retrieve', { target_org: 'myorg', flags: ['--user-story', 'US-1'] }); + const result = server.call('provar.qualityhub.testcase.retrieve', { + target_org: 'myorg', + flags: ['--user-story', 'US-1'], + }); const body = parseBody(result); assert.equal(body.exitCode, 0); const args = spawnStub.firstCall.args[1] as string[]; diff --git a/test/unit/mcp/rcaTools.test.ts b/test/unit/mcp/rcaTools.test.ts index e419c0c..4f24e29 100644 --- a/test/unit/mcp/rcaTools.test.ts +++ b/test/unit/mcp/rcaTools.test.ts @@ -80,11 +80,13 @@ const UNKNOWN_JUNIT_XML = ` let tmpDir: string; let server: MockMcpServer; +const UNRESTRICTED_CONFIG = { allowedPaths: [] }; + beforeEach(() => { tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'rcatools-test-')); server = new MockMcpServer(); registerTestRunLocate(server as never); - registerTestRunRca(server as never); + registerTestRunRca(server as never, UNRESTRICTED_CONFIG); }); afterEach(() => { @@ -610,4 +612,104 @@ describe('provar.testrun.rca', () => { 'Salesforce categories should not appear in infrastructure_issues' ); }); + + it('mode=rca is the default and produces existing RCA output shape', () => { + const resultsDir = makeResultsDir(path.join(tmpDir, 'rca-default'), JUNIT_XML); + const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + // Full RCA shape must be present + assert.ok('run_summary' in body, 'run_summary should be present for mode=rca'); + assert.ok('failures' in body, 'failures should be present'); + assert.ok('recommendations' in body, 'recommendations should be present'); + assert.ok('infrastructure_issues' in body, 'infrastructure_issues should be present'); + }); +}); + +// ── provar.testrun.rca mode=failures ────────────────────────────────────────── + +describe('provar.testrun.rca mode=failures', () => { + it('returns lightweight failure array when JUnit.xml is present', () => { + const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-mode'), JUNIT_XML); + const body = parseText( + server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + ); + + assert.ok('failures' in body, 'failures should be present'); + const failures = body['failures'] as Array>; + assert.equal(failures.length, 1, 'JUNIT_XML has exactly 1 failing test case'); + + const f = failures[0]; + assert.ok('testItemId' in f, 'testItemId should be present'); + assert.ok('title' in f, 'title should be present'); + assert.ok('errorMessage' in f, 'errorMessage should be present'); + assert.equal(f['testItemId'], 'SearchTest.testcase'); + assert.ok(typeof f['errorMessage'] === 'string' && f['errorMessage'].length > 0); + + // Should NOT contain full RCA fields + assert.ok(!('run_summary' in body), 'mode=failures should not include run_summary'); + assert.ok(!('infrastructure_issues' in body), 'mode=failures should not include infrastructure_issues'); + assert.ok(!('recommendations' in body), 'mode=failures should not include recommendations'); + }); + + it('returns empty array with warning when results dir has no JUnit.xml', () => { + const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-empty')); + const body = parseText( + server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + ); + + const failures = body['failures'] as unknown[]; + assert.equal(failures.length, 0, 'should return empty array'); + + const details = body['details'] as Record; + assert.ok(typeof details?.['warning'] === 'string', 'details.warning should be set'); + }); + + it('skipped tests are not included in failures list', () => { + const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-skip'), JUNIT_XML); + const body = parseText( + server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + ); + const failures = body['failures'] as Array>; + const testItemIds = failures.map((f) => f['testItemId']); + assert.ok(!testItemIds.includes('DataTest.testcase'), 'skipped test should not appear in failures'); + }); + + it('rejects explicit results_path outside allowed paths', () => { + const restrictedServer = new MockMcpServer(); + const allowedDir = path.join(tmpDir, 'allowed'); + fs.mkdirSync(allowedDir, { recursive: true }); + registerTestRunRca(restrictedServer as never, { allowedPaths: [allowedDir] }); + + const outsideDir = makeResultsDir(path.join(tmpDir, 'outside'), JUNIT_XML); + const result = restrictedServer.call('provar.testrun.rca', { + project_path: tmpDir, + results_path: outsideDir, + mode: 'failures', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.ok( + body['error_code'] === 'PATH_NOT_ALLOWED' || body['error_code'] === 'PATH_TRAVERSAL', + `expected path policy error, got: ${String(body['error_code'])}` + ); + }); + + it('rejects project_path outside allowed paths', () => { + const restrictedServer = new MockMcpServer(); + const allowedDir = path.join(tmpDir, 'allowed'); + fs.mkdirSync(allowedDir, { recursive: true }); + registerTestRunRca(restrictedServer as never, { allowedPaths: [allowedDir] }); + + const result = restrictedServer.call('provar.testrun.rca', { + project_path: tmpDir, // tmpDir root is outside allowed subdir + mode: 'failures', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.ok( + body['error_code'] === 'PATH_NOT_ALLOWED' || body['error_code'] === 'PATH_TRAVERSAL', + `expected path policy error for project_path, got: ${String(body['error_code'])}` + ); + }); }); diff --git a/test/unit/mcp/testCaseGenerate.test.ts b/test/unit/mcp/testCaseGenerate.test.ts index cd24444..bbdcf1b 100644 --- a/test/unit/mcp/testCaseGenerate.test.ts +++ b/test/unit/mcp/testCaseGenerate.test.ts @@ -392,4 +392,101 @@ describe('provar.testcase.generate', () => { assert.ok(!xml.includes('valueClass="String"'), 'Must not emit uppercase valueClass="String"'); }); }); + + describe('target_uri — non-SF page object (ui:) nesting', () => { + it('wraps steps in UiWithScreen when target_uri uses ?pageId= format', () => { + const result = server.call('provar.testcase.generate', { + test_case_name: 'Non-SF Login', + steps: [{ api_id: 'UiDoAction', name: 'Enter username', attributes: { field: 'username' } }], + target_uri: 'ui:pageobject:target?pageId=pageobjects.LoginPage', + dry_run: true, + overwrite: false, + validate_after_edit: true, + }); + + assert.equal(isError(result), false); + const xml = parseText(result)['xml_content'] as string; + assert.ok(xml.includes('UiWithScreen'), 'Expected UiWithScreen wrapper'); + assert.ok(xml.includes('testItemId="1"'), 'UiWithScreen should be testItemId=1'); + assert.ok(xml.includes('ui:pageobject:target?pageId=pageobjects.LoginPage'), 'Expected target URI in XML'); + assert.ok(xml.includes(' { + const result = server.call('provar.testcase.generate', { + test_case_name: 'Non-SF Multi', + steps: [ + { api_id: 'UiDoAction', name: 'Step A', attributes: {} }, + { api_id: 'UiDoAction', name: 'Step B', attributes: {} }, + ], + target_uri: 'ui:pageobject:target?pageId=pageobjects.LoginPage', + dry_run: true, + overwrite: false, + validate_after_edit: true, + }); + + const xml = parseText(result)['xml_content'] as string; + assert.ok(xml.includes('testItemId="2"'), 'substeps clause should be testItemId=2'); + assert.ok(xml.includes('testItemId="3"'), 'First inner step should be testItemId=3'); + assert.ok(xml.includes('testItemId="4"'), 'Second inner step should be testItemId=4'); + }); + + it('uses flat structure when target_uri starts with sf:', () => { + const result = server.call('provar.testcase.generate', { + test_case_name: 'SF Target', + steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], + target_uri: 'sf:ui:target:Salesforce__Standard__Account', + dry_run: true, + overwrite: false, + validate_after_edit: true, + }); + + const xml = parseText(result)['xml_content'] as string; + assert.ok(!xml.includes('UiWithScreen'), 'Should not wrap in UiWithScreen for sf: target'); + assert.ok(xml.includes('testItemId="1"'), 'Step should be testItemId=1 directly'); + }); + + it('uses flat structure when target_uri is omitted', () => { + const result = server.call('provar.testcase.generate', { + test_case_name: 'No URI', + steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], + dry_run: true, + overwrite: false, + validate_after_edit: true, + }); + + const xml = parseText(result)['xml_content'] as string; + assert.ok(!xml.includes('UiWithScreen'), 'No UiWithScreen without target_uri'); + }); + }); + + describe('validate_after_edit', () => { + it('includes validation field when validate_after_edit=true (default)', () => { + const result = server.call('provar.testcase.generate', { + test_case_name: 'Validated', + steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], + dry_run: true, + overwrite: false, + validate_after_edit: true, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.ok(body['validation'], 'Expected validation field'); + }); + + it('omits validation field when validate_after_edit=false', () => { + const result = server.call('provar.testcase.generate', { + test_case_name: 'Skip Validation', + steps: [], + dry_run: true, + overwrite: false, + validate_after_edit: false, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.ok(!('validation' in body), 'validation field should be absent when validate_after_edit=false'); + }); + }); }); diff --git a/test/unit/mcp/testCaseStepTools.test.ts b/test/unit/mcp/testCaseStepTools.test.ts new file mode 100644 index 0000000..af46cfa --- /dev/null +++ b/test/unit/mcp/testCaseStepTools.test.ts @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2024 Provar Limited. + * All rights reserved. + * Licensed under the BSD 3-Clause license. + * For full license text, see LICENSE.md file in the repo root or https://opensource.org/licenses/BSD-3-Clause + */ + +/* eslint-disable camelcase */ +import { strict as assert } from 'node:assert'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { describe, it, beforeEach, afterEach } from 'mocha'; +import { registerAllTestCaseStepTools } from '../../../src/mcp/tools/testCaseStepTools.js'; + +// ── Mock MCP server ──────────────────────────────────────────────────────────── + +type ToolHandler = (args: Record) => unknown; + +class MockMcpServer { + private handlers = new Map(); + + public tool(name: string, _desc: string, _schema: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + + public call(name: string, args: Record): ReturnType { + const h = this.handlers.get(name); + if (!h) throw new Error(`Tool not registered: ${name}`); + return h(args); + } +} + +// ── Helpers ──────────────────────────────────────────────────────────────────── + +function parseText(result: unknown): Record { + const r = result as { content: Array<{ type: string; text: string }> }; + return JSON.parse(r.content[0].text) as Record; +} + +function isError(result: unknown): boolean { + return (result as { isError?: boolean }).isError === true; +} + +// ── Fixture XML ──────────────────────────────────────────────────────────────── + +const VALID_TESTCASE_XML = ` + + + + + + +`; + +const NEW_STEP_XML = + ''; + +// ── Test setup ───────────────────────────────────────────────────────────────── + +let tmpDir: string; +let server: MockMcpServer; +let tcPath: string; + +beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'steptools-test-')); + server = new MockMcpServer(); + registerAllTestCaseStepTools(server as never, { allowedPaths: [] }); + tcPath = path.join(tmpDir, 'SmokeTest.testcase'); + fs.writeFileSync(tcPath, VALID_TESTCASE_XML, 'utf-8'); +}); + +afterEach(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); +}); + +// ── provar.testcase.step.edit ────────────────────────────────────────────────── + +describe('provar.testcase.step.edit', () => { + // remove happy path + it('mode=remove removes the target step and leaves file valid', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'remove', + test_item_id: '2', + validate_after_edit: false, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.equal(body['success'], true); + assert.equal(body['mode'], 'remove'); + assert.equal(body['test_item_id'], '2'); + + const written = fs.readFileSync(tcPath, 'utf-8'); + assert.ok(!written.includes('testItemId="2"'), 'step 2 should be removed'); + assert.ok(written.includes('testItemId="1"'), 'step 1 should remain'); + assert.ok(written.includes('testItemId="3"'), 'step 3 should remain'); + assert.ok(!fs.existsSync(tcPath + '.bak'), 'backup file should be deleted on success'); + }); + + // add happy path — after anchor + it('mode=add inserts new step after anchor by default', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '1', + step_xml: NEW_STEP_XML, + validate_after_edit: false, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.equal(body['success'], true); + assert.equal(body['mode'], 'add'); + + const written = fs.readFileSync(tcPath, 'utf-8'); + assert.ok(written.includes('testItemId="99"'), 'new step should be present'); + // Verify order: step 1 before step 99 + const pos1 = written.indexOf('testItemId="1"'); + const pos99 = written.indexOf('testItemId="99"'); + const pos2 = written.indexOf('testItemId="2"'); + assert.ok(pos1 < pos99, 'anchor step 1 should appear before new step 99'); + assert.ok(pos99 < pos2, 'new step 99 should appear before step 2'); + assert.ok(!fs.existsSync(tcPath + '.bak'), 'backup file should be deleted on success'); + }); + + // add before anchor + it('mode=add with position=before inserts before anchor', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '2', + position: 'before', + step_xml: NEW_STEP_XML, + validate_after_edit: false, + }); + + assert.equal(isError(result), false); + const written = fs.readFileSync(tcPath, 'utf-8'); + const pos99 = written.indexOf('testItemId="99"'); + const pos2 = written.indexOf('testItemId="2"'); + assert.ok(pos99 < pos2, 'new step should appear before anchor step 2'); + }); + + // STEP_NOT_FOUND — remove + it('mode=remove returns STEP_NOT_FOUND with all IDs when testItemId missing', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'remove', + test_item_id: '999', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'STEP_NOT_FOUND'); + const details = body['details'] as Record; + const allIds = details['all_test_item_ids'] as string[]; + assert.ok(Array.isArray(allIds), 'details.all_test_item_ids should be an array'); + assert.ok(allIds.includes('1'), 'should list testItemId 1'); + assert.ok(allIds.includes('2'), 'should list testItemId 2'); + assert.ok(allIds.includes('3'), 'should list testItemId 3'); + // File should be unchanged + const written = fs.readFileSync(tcPath, 'utf-8'); + assert.equal(written, VALID_TESTCASE_XML); + }); + + // STEP_NOT_FOUND — add + it('mode=add returns STEP_NOT_FOUND with all IDs when anchor missing', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '999', + step_xml: NEW_STEP_XML, + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'STEP_NOT_FOUND'); + }); + + // INVALID_STEP_XML — step_xml contains no element + it('mode=add returns INVALID_STEP_XML when step_xml contains no element', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '1', + step_xml: '', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'INVALID_STEP_XML'); + // File must be untouched (no backup written) + assert.ok(!fs.existsSync(tcPath + '.bak'), 'no backup should be written for pre-mutation errors'); + const written = fs.readFileSync(tcPath, 'utf-8'); + assert.equal(written, VALID_TESTCASE_XML); + }); + + // INVALID_STEP_XML — step_xml contains multiple elements + it('mode=add returns INVALID_STEP_XML when step_xml contains multiple elements', () => { + const multiStep = + '' + + ''; + + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '1', + step_xml: multiStep, + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'INVALID_STEP_XML'); + assert.ok(!fs.existsSync(tcPath + '.bak'), 'no backup should be written for pre-mutation errors'); + }); + + // mode=add with missing step_xml + it('mode=add returns MISSING_INPUT when step_xml is absent', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '1', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'MISSING_INPUT'); + }); + + // Backup restored when validation fails after mutation + it('restores backup and returns INVALID_XML_AFTER_EDIT when mutated file fails validation', () => { + // Write a test case where removing the only step will leave empty, + // but the XML itself is still structurally valid. We need a case where + // validate_after_edit=true fires and the result is invalid. + // The simplest trigger: create a test case where removing all 3 steps produces + // an empty element — validateTestCase passes this since it only checks presence. + // Instead, we test with an intentionally broken step_xml that results in an invalid + // testCase XML (step with non-UUID guid). + const brokenStepXml = + ''; + + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'add', + test_item_id: '1', + step_xml: brokenStepXml, + validate_after_edit: true, + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'INVALID_XML_AFTER_EDIT'); + + // Original file should be restored + const written = fs.readFileSync(tcPath, 'utf-8'); + assert.ok(!written.includes('testItemId="99"'), 'broken step should not be in restored file'); + assert.ok(!fs.existsSync(tcPath + '.bak'), 'backup should be deleted after restore'); + }); + + // Path policy: path outside allowed paths is rejected + it('rejects test_case_path outside allowed paths', () => { + const restrictedServer = new MockMcpServer(); + registerAllTestCaseStepTools(restrictedServer as never, { allowedPaths: [path.join(tmpDir, 'allowed')] }); + fs.mkdirSync(path.join(tmpDir, 'allowed'), { recursive: true }); + + const result = restrictedServer.call('provar.testcase.step.edit', { + test_case_path: tcPath, // tcPath is in tmpDir root, not in allowed subdir + mode: 'remove', + test_item_id: '1', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.ok( + body['error_code'] === 'PATH_NOT_ALLOWED' || body['error_code'] === 'PATH_TRAVERSAL', + `expected path policy error, got: ${String(body['error_code'])}` + ); + }); + + // validate_after_edit=true with valid result includes validation in response + it('returns validation result in response when validate_after_edit=true and edit is valid', () => { + const result = server.call('provar.testcase.step.edit', { + test_case_path: tcPath, + mode: 'remove', + test_item_id: '2', + validate_after_edit: true, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.equal(body['success'], true); + // The fixture XML has a valid structure so validation should pass + const validation = body['validation'] as Record | undefined; + assert.ok(validation !== undefined, 'validation field should be present'); + }); + + // FILE_NOT_FOUND + it('returns FILE_NOT_FOUND when test case does not exist', () => { + const missing = path.join(tmpDir, 'nonexistent.testcase'); + const result = server.call('provar.testcase.step.edit', { + test_case_path: missing, + mode: 'remove', + test_item_id: '1', + }); + + assert.equal(isError(result), true); + const body = parseText(result); + assert.equal(body['error_code'], 'FILE_NOT_FOUND'); + }); +}); diff --git a/test/unit/mcp/testCaseValidate.test.ts b/test/unit/mcp/testCaseValidate.test.ts index 1b40edc..628ce59 100644 --- a/test/unit/mcp/testCaseValidate.test.ts +++ b/test/unit/mcp/testCaseValidate.test.ts @@ -6,7 +6,12 @@ import path from 'node:path'; import { describe, it, beforeEach, afterEach } from 'mocha'; import sinon from 'sinon'; import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; -import { validateTestCase, registerTestCaseValidate } from '../../../src/mcp/tools/testCaseValidate.js'; +import { + validateTestCase, + registerTestCaseValidate, + validateTestCaseXml, +} from '../../../src/mcp/tools/testCaseValidate.js'; +import type { ServerConfig } from '../../../src/mcp/server.js'; import { qualityHubClient, QualityHubAuthError, @@ -391,3 +396,42 @@ describe('registerTestCaseValidate handler', () => { assert.ok(String(result['validation_warning']).toLowerCase().includes('rate limit')); }); }); + +// ── validateTestCaseXml ─────────────────────────────────────────────────────── + +function makeConfig(allowedPath: string): ServerConfig { + return { allowedPaths: [allowedPath] } as unknown as ServerConfig; +} + +describe('validateTestCaseXml', () => { + let tmpDir: string; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'tc-xml-')); + }); + + afterEach(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); + }); + + it('reads file from disk and returns validation result', () => { + const filePath = path.join(tmpDir, 'test.testcase'); + fs.writeFileSync(filePath, VALID_TC); + const result = validateTestCaseXml(filePath, makeConfig(tmpDir)); + assert.equal(result.is_valid, true); + assert.equal(result.error_count, 0); + }); + + it('throws TESTCASE_FILE_NOT_FOUND when file does not exist', () => { + const missing = path.join(tmpDir, 'missing.testcase'); + assert.throws( + () => validateTestCaseXml(missing, makeConfig(tmpDir)), + (err: Error & { code?: string }) => err.code === 'TESTCASE_FILE_NOT_FOUND' + ); + }); + + it('throws when file path is outside allowed paths', () => { + const outside = path.join(os.tmpdir(), 'outside.testcase'); + assert.throws(() => validateTestCaseXml(outside, makeConfig(tmpDir))); + }); +});