diff --git a/packages/gooddata-sdk/src/gooddata_sdk/__init__.py b/packages/gooddata-sdk/src/gooddata_sdk/__init__.py index 77397b92d..2e636e351 100644 --- a/packages/gooddata-sdk/src/gooddata_sdk/__init__.py +++ b/packages/gooddata-sdk/src/gooddata_sdk/__init__.py @@ -280,6 +280,7 @@ ExecutionDefinition, ExecutionResponse, ExecutionResult, + MetricDefinitionOverride, ResultCacheMetadata, ResultSizeBytesLimitExceeded, ResultSizeDimensions, @@ -316,6 +317,11 @@ PopDatesetMetric, SimpleMetric, ) +from gooddata_sdk.compute.model.what_if import ( + AfmWhatIfMeasureAdjustmentConfig, + AfmWhatIfScenarioConfig, + AfmWhatIfScenarioItem, +) from gooddata_sdk.compute.service import ComputeService from gooddata_sdk.sdk import GoodDataSdk from gooddata_sdk.table import ExecutionTable, TableService diff --git a/packages/gooddata-sdk/src/gooddata_sdk/compute/model/execution.py b/packages/gooddata-sdk/src/gooddata_sdk/compute/model/execution.py index d947bad60..1ee81f89c 100644 --- a/packages/gooddata-sdk/src/gooddata_sdk/compute/model/execution.py +++ b/packages/gooddata-sdk/src/gooddata_sdk/compute/model/execution.py @@ -29,6 +29,49 @@ logger = logging.getLogger(__name__) +@define(kw_only=True) +class MetricDefinitionOverride: + """(EXPERIMENTAL) Override for a catalog metric definition used during execution. + + Allows substituting a catalog metric's MAQL definition for a single + computation request without modifying the stored catalog definition. + + Args: + item_id: ID of the catalog metric whose definition is being overridden. + item_type: Type of the catalog item. One of ``"attribute"``, ``"label"``, + ``"fact"``, or ``"metric"``. + maql: MAQL expression to use instead of the stored definition. + """ + + item_id: str + item_type: str + maql: str + + def as_api_model(self) -> models.MetricDefinitionOverride: + identifier = models.AfmObjectIdentifierCoreIdentifier( + id=self.item_id, + type=self.item_type, + _check_type=False, + ) + item = models.AfmObjectIdentifierCore( + identifier=identifier, + _check_type=False, + ) + inline = models.InlineMeasureDefinitionInline( + maql=self.maql, + _check_type=False, + ) + definition = models.InlineMeasureDefinition( + inline=inline, + _check_type=False, + ) + return models.MetricDefinitionOverride( + item=item, + definition=definition, + _check_type=False, + ) + + @define class TotalDimension: idx: int @@ -72,6 +115,7 @@ def __init__( dimensions: list[TableDimension], totals: list[TotalDefinition] | None = None, is_cancellable: bool = False, + measure_definition_overrides: list[MetricDefinitionOverride] | None = None, ) -> None: self._attributes = attributes or [] self._metrics = metrics or [] @@ -79,6 +123,7 @@ def __init__( self._dimensions = [dim for dim in dimensions if dim.item_ids is not None] self._totals = totals self._is_cancellable = is_cancellable + self._measure_definition_overrides = measure_definition_overrides or [] @property def attributes(self) -> list[Attribute]: @@ -115,6 +160,10 @@ def is_two_dim(self) -> bool: def is_cancellable(self) -> bool: return self._is_cancellable + @property + def measure_definition_overrides(self) -> list[MetricDefinitionOverride]: + return self._measure_definition_overrides + def _create_value_sort_key(self, sort_key: dict) -> models.SortKey: sort_key_value = sort_key["value"] return models.SortKey( @@ -209,7 +258,14 @@ def _create_result_spec(self) -> models.ResultSpec: return models.ResultSpec(dimensions=dimensions, totals=totals) def as_api_model(self) -> models.AfmExecution: - execution = compute_model_to_api_model(attributes=self.attributes, metrics=self.metrics, filters=self.filters) + execution = compute_model_to_api_model( + attributes=self.attributes, + metrics=self.metrics, + filters=self.filters, + measure_definition_overrides=self.measure_definition_overrides + if self.measure_definition_overrides + else None, + ) result_spec = self._create_result_spec() return models.AfmExecution(execution=execution, result_spec=result_spec) @@ -568,6 +624,7 @@ def compute_model_to_api_model( attributes: list[Attribute] | None = None, metrics: list[Metric] | None = None, filters: list[Filter] | None = None, + measure_definition_overrides: list[MetricDefinitionOverride] | None = None, ) -> models.AFM: """ Transforms categorized execution model entities (attributes, metrics, facts) into an API model @@ -576,9 +633,16 @@ def compute_model_to_api_model( :param attributes: optionally specify list of attributes :param metrics: optionally specify list of metrics :param filters: optionally specify list of filters + :param measure_definition_overrides: optionally specify metric definition overrides """ + kwargs: dict[str, Any] = {} + if measure_definition_overrides: + kwargs["measure_definition_overrides"] = [o.as_api_model() for o in measure_definition_overrides] + return models.AFM( attributes=[a.as_api_model() for a in attributes] if attributes is not None else [], measures=[m.as_api_model() for m in metrics] if metrics is not None else [], filters=[f.as_api_model() for f in filters if not f.is_noop()] if filters is not None else [], + _check_type=False, + **kwargs, ) diff --git a/packages/gooddata-sdk/src/gooddata_sdk/compute/model/what_if.py b/packages/gooddata-sdk/src/gooddata_sdk/compute/model/what_if.py new file mode 100644 index 000000000..59b7cee66 --- /dev/null +++ b/packages/gooddata-sdk/src/gooddata_sdk/compute/model/what_if.py @@ -0,0 +1,72 @@ +# (C) 2025 GoodData Corporation +from __future__ import annotations + +import gooddata_api_client.models as afm_models +from attrs import define, field + + +@define(kw_only=True) +class AfmWhatIfMeasureAdjustmentConfig: + """SDK wrapper for a single measure adjustment within a what-if scenario. + + Represents an alternative MAQL definition for a catalog metric or fact that + is used only during the current what-if computation without modifying the + stored definition. + """ + + metric_id: str + """ID of the metric or fact to adjust.""" + metric_type: str + """Type of the object being adjusted. Typically 'metric' or 'fact'.""" + scenario_maql: str + """Alternative MAQL expression to use for this scenario.""" + + def as_api_model(self) -> afm_models.WhatIfMeasureAdjustmentConfig: + return afm_models.WhatIfMeasureAdjustmentConfig( + metric_id=self.metric_id, + metric_type=self.metric_type, + scenario_maql=self.scenario_maql, + _check_type=False, + ) + + +@define(kw_only=True) +class AfmWhatIfScenarioItem: + """SDK wrapper for a single what-if scenario. + + Represents one named scenario that overrides one or more measure definitions + with alternative MAQL expressions. + """ + + label: str + """Human-readable label for the scenario.""" + adjustments: list[AfmWhatIfMeasureAdjustmentConfig] = field(factory=list) + """Measure adjustments for this scenario.""" + + def as_api_model(self) -> afm_models.WhatIfScenarioItem: + return afm_models.WhatIfScenarioItem( + label=self.label, + adjustments=[a.as_api_model() for a in self.adjustments], + _check_type=False, + ) + + +@define(kw_only=True) +class AfmWhatIfScenarioConfig: + """SDK wrapper for what-if scenario analysis configuration. + + Passed as part of :class:`AfmVisualizationConfig` to trigger what-if + computation alongside a regular AFM execution. + """ + + include_baseline: bool + """Whether the unmodified (baseline) values are included in the result.""" + scenarios: list[AfmWhatIfScenarioItem] = field(factory=list) + """Scenarios, each providing alternative measure calculations.""" + + def as_api_model(self) -> afm_models.WhatIfScenarioConfig: + return afm_models.WhatIfScenarioConfig( + include_baseline=self.include_baseline, + scenarios=[s.as_api_model() for s in self.scenarios], + _check_type=False, + ) diff --git a/packages/gooddata-sdk/tests/compute_model/test_metric_definition_override.py b/packages/gooddata-sdk/tests/compute_model/test_metric_definition_override.py new file mode 100644 index 000000000..f3c4bc7d2 --- /dev/null +++ b/packages/gooddata-sdk/tests/compute_model/test_metric_definition_override.py @@ -0,0 +1,150 @@ +# (C) 2025 GoodData Corporation +from __future__ import annotations + +import pytest +from gooddata_sdk.compute.model.base import ObjId +from gooddata_sdk.compute.model.execution import MetricDefinitionOverride, compute_model_to_api_model +from gooddata_sdk.compute.model.metric import SimpleMetric +from gooddata_sdk.compute.model.what_if import ( + AfmWhatIfMeasureAdjustmentConfig, + AfmWhatIfScenarioConfig, + AfmWhatIfScenarioItem, +) + + +class TestMetricDefinitionOverride: + def test_as_api_model_produces_correct_structure(self): + override = MetricDefinitionOverride( + item_id="my.metric", + item_type="metric", + maql="SELECT SUM({fact/revenue}) WHERE {attribute/region} = 'West'", + ) + api_model = override.as_api_model() + result = api_model.to_dict() + + assert result["item"]["identifier"]["id"] == "my.metric" + assert result["item"]["identifier"]["type"] == "metric" + assert result["definition"]["inline"]["maql"] == ( + "SELECT SUM({fact/revenue}) WHERE {attribute/region} = 'West'" + ) + + def test_as_api_model_with_fact_type(self): + override = MetricDefinitionOverride( + item_id="revenue.fact", + item_type="fact", + maql="SELECT AVG({fact/revenue})", + ) + api_model = override.as_api_model() + result = api_model.to_dict() + + assert result["item"]["identifier"]["type"] == "fact" + + +class TestComputeModelToApiModelWithOverrides: + def test_measure_definition_overrides_forwarded(self): + metric = SimpleMetric(local_id="m1", item=ObjId(type="metric", id="catalog.metric")) + override = MetricDefinitionOverride( + item_id="catalog.metric", + item_type="metric", + maql="SELECT SUM({fact/cost})", + ) + + afm = compute_model_to_api_model( + metrics=[metric], + measure_definition_overrides=[override], + ) + result = afm.to_dict() + + assert "measureDefinitionOverrides" in result + overrides = result["measureDefinitionOverrides"] + assert len(overrides) == 1 + assert overrides[0]["item"]["identifier"]["id"] == "catalog.metric" + assert overrides[0]["definition"]["inline"]["maql"] == "SELECT SUM({fact/cost})" + + def test_no_overrides_omits_field(self): + metric = SimpleMetric(local_id="m1", item=ObjId(type="metric", id="catalog.metric")) + afm = compute_model_to_api_model(metrics=[metric]) + result = afm.to_dict() + + assert result.get("measureDefinitionOverrides") is None or result.get("measureDefinitionOverrides") == [] + + +class TestAfmWhatIfMeasureAdjustmentConfig: + def test_as_api_model_produces_correct_structure(self): + adjustment = AfmWhatIfMeasureAdjustmentConfig( + metric_id="revenue", + metric_type="metric", + scenario_maql="SELECT SUM({fact/revenue}) * 1.1", + ) + api_model = adjustment.as_api_model() + result = api_model.to_dict() + + assert result["metricId"] == "revenue" + assert result["metricType"] == "metric" + assert result["scenarioMaql"] == "SELECT SUM({fact/revenue}) * 1.1" + + +class TestAfmWhatIfScenarioItem: + def test_as_api_model_with_adjustments(self): + adjustment = AfmWhatIfMeasureAdjustmentConfig( + metric_id="revenue", + metric_type="metric", + scenario_maql="SELECT SUM({fact/revenue}) * 1.1", + ) + scenario = AfmWhatIfScenarioItem( + label="Optimistic +10%", + adjustments=[adjustment], + ) + api_model = scenario.as_api_model() + result = api_model.to_dict() + + assert result["label"] == "Optimistic +10%" + assert len(result["adjustments"]) == 1 + assert result["adjustments"][0]["metricId"] == "revenue" + + def test_as_api_model_empty_adjustments(self): + scenario = AfmWhatIfScenarioItem(label="Empty scenario") + result = scenario.as_api_model().to_dict() + + assert result["label"] == "Empty scenario" + assert result["adjustments"] == [] + + +class TestAfmWhatIfScenarioConfig: + def test_as_api_model_with_scenarios(self): + adjustment = AfmWhatIfMeasureAdjustmentConfig( + metric_id="revenue", + metric_type="metric", + scenario_maql="SELECT SUM({fact/revenue}) * 0.9", + ) + scenario = AfmWhatIfScenarioItem(label="Pessimistic -10%", adjustments=[adjustment]) + config = AfmWhatIfScenarioConfig(include_baseline=True, scenarios=[scenario]) + + result = config.as_api_model().to_dict() + + assert result["includeBaseline"] is True + assert len(result["scenarios"]) == 1 + assert result["scenarios"][0]["label"] == "Pessimistic -10%" + + def test_as_api_model_no_baseline(self): + config = AfmWhatIfScenarioConfig(include_baseline=False) + result = config.as_api_model().to_dict() + + assert result["includeBaseline"] is False + assert result["scenarios"] == [] + + @pytest.mark.parametrize( + "include_baseline, scenario_count", + [ + (True, 0), + (False, 1), + (True, 2), + ], + ) + def test_as_api_model_parametrized(self, include_baseline: bool, scenario_count: int): + scenarios = [AfmWhatIfScenarioItem(label=f"scenario_{i}") for i in range(scenario_count)] + config = AfmWhatIfScenarioConfig(include_baseline=include_baseline, scenarios=scenarios) + result = config.as_api_model().to_dict() + + assert result["includeBaseline"] == include_baseline + assert len(result["scenarios"]) == scenario_count