diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index b9d3350..3082dbe 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -32,7 +32,6 @@ ProviderConfig, ) from ldai.providers import ( - AgentGraphResult, AgentGraphRunner, AgentGraphRunnerResult, GraphMetrics, @@ -50,7 +49,6 @@ 'LDAIClient', 'Evaluator', 'AgentGraphRunner', - 'AgentGraphResult', 'AgentGraphRunnerResult', 'GraphMetrics', 'GraphMetricSummary', diff --git a/packages/sdk/server-ai/src/ldai/managed_model.py b/packages/sdk/server-ai/src/ldai/managed_model.py index 1c45603..87395b4 100644 --- a/packages/sdk/server-ai/src/ldai/managed_model.py +++ b/packages/sdk/server-ai/src/ldai/managed_model.py @@ -2,7 +2,7 @@ from typing import List from ldai import log -from ldai.models import AICompletionConfig, LDMessage +from ldai.models import AICompletionConfig from ldai.providers.runner import Runner from ldai.providers.types import JudgeResult, ManagedResult, RunnerResult from ldai.tracker import LDAIConfigTracker @@ -12,9 +12,10 @@ class ManagedModel: """ LaunchDarkly managed wrapper for AI model invocations. - Holds a Runner. Handles conversation management, judge evaluation - dispatch, and tracking automatically via ``create_tracker()``. - Obtain an instance via ``LDAIClient.create_model()``. + Holds a Runner. Handles judge evaluation dispatch and tracking + automatically via ``create_tracker()``. Conversation history is + managed by the runner. Obtain an instance via + ``LDAIClient.create_model()``. """ def __init__( @@ -24,15 +25,13 @@ def __init__( ): self._ai_config = ai_config self._model_runner = model_runner - self._messages: List[LDMessage] = [] async def run(self, prompt: str) -> ManagedResult: """ Run the model with a prompt string. - Appends the prompt to the conversation history, prepends any - system messages from the config, delegates to the runner, and - appends the response to the history. + Delegates to the runner, then dispatches judge evaluations and + records tracking metrics. :param prompt: The user prompt to send to the model :return: ManagedResult containing the model's response, metric summary, @@ -40,21 +39,12 @@ async def run(self, prompt: str) -> ManagedResult: """ tracker = self._ai_config.create_tracker() - user_message = LDMessage(role='user', content=prompt) - self._messages.append(user_message) - result: RunnerResult = await tracker.track_metrics_of_async( lambda r: r.metrics, lambda: self._model_runner.run(prompt), ) - assistant_message = LDMessage(role='assistant', content=result.content) - - input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else '' - - evaluations_task = self._track_judge_results(tracker, input_text, result.content) - - self._messages.append(assistant_message) + evaluations_task = self._track_judge_results(tracker, prompt, result.content) return ManagedResult( content=result.content, @@ -88,25 +78,6 @@ async def _run_and_track(eval_task: asyncio.Task) -> List[JudgeResult]: return asyncio.create_task(_run_and_track(evaluator_task)) - def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]: - """ - Get all messages in the conversation history. - - :param include_config_messages: When True, prepends config messages. - :return: List of conversation messages. - """ - if include_config_messages: - return (self._ai_config.messages or []) + self._messages - return list(self._messages) - - def append_messages(self, messages: List[LDMessage]) -> None: - """ - Append messages to the conversation history without invoking the model. - - :param messages: Messages to append. - """ - self._messages.extend(messages) - def get_model_runner(self) -> Runner: """ Return the underlying runner for advanced use. diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index f5beb3e..3012967 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -3,7 +3,6 @@ from ldai.providers.runner import Runner from ldai.providers.runner_factory import RunnerFactory from ldai.providers.types import ( - AgentGraphResult, AgentGraphRunnerResult, GraphMetrics, GraphMetricSummary, @@ -17,7 +16,6 @@ __all__ = [ 'AIProvider', - 'AgentGraphResult', 'AgentGraphRunner', 'AgentGraphRunnerResult', 'GraphMetrics', diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py index 229e5f1..9ade163 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -204,20 +204,3 @@ def to_dict(self) -> Dict[str, Any]: if self.error_message is not None: result['errorMessage'] = self.error_message return result - - -@dataclass -class AgentGraphResult: - """Contains the result of an agent graph run.""" - - output: str - """The agent graph's final output content.""" - - raw: Any - """The provider-native response object from the graph run.""" - - metrics: LDAIMetrics - """Metrics recorded during the graph run.""" - - evaluations: Optional[List[JudgeResult]] = None - """Optional list of judge evaluation results produced for the graph run.""" diff --git a/packages/sdk/server-ai/tests/test_runner_abcs.py b/packages/sdk/server-ai/tests/test_runner_abcs.py index d5ef80d..d9af2e3 100644 --- a/packages/sdk/server-ai/tests/test_runner_abcs.py +++ b/packages/sdk/server-ai/tests/test_runner_abcs.py @@ -1,7 +1,6 @@ import pytest from ldai.providers import ( - AgentGraphResult, AgentGraphRunner, AgentGraphRunnerResult, ToolRegistry, @@ -78,6 +77,5 @@ def test_tool_registry_is_dict_of_callables(): def test_top_level_exports(): import ldai assert hasattr(ldai, 'AgentGraphRunner') - assert hasattr(ldai, 'AgentGraphResult') assert hasattr(ldai, 'RunnerResult') assert hasattr(ldai, 'ToolRegistry')