diff --git a/examples/bedrock/README.md b/examples/bedrock/README.md index af7a6a2..09da9dc 100644 --- a/examples/bedrock/README.md +++ b/examples/bedrock/README.md @@ -6,21 +6,25 @@ This example demonstrates how to use LaunchDarkly's AI Config with the AWS Bedro - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - AWS credentials configured for Bedrock access ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with a Bedrock model and a system message. Default key: `sample-completion-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. + > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-completion-config` if not set. -2. Ensure your AWS credentials can be [auto-detected by the `boto3` library](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). You can set them in your `.env` file: +1. Ensure your AWS credentials can be [auto-detected by the `boto3` library](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). You can set them in your `.env` file: ``` AWS_ACCESS_KEY_ID=your-access-key-id @@ -30,7 +34,7 @@ This example demonstrates how to use LaunchDarkly's AI Config with the AWS Bedro Other options include role providers or shared credential files. -3. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -39,5 +43,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with the AWS Bedro ## Run ```bash -poetry run bedrock-example +poetry run bedrock ``` diff --git a/examples/bedrock/bedrock_example.py b/examples/bedrock/bedrock_example.py index 74e482b..f10265b 100755 --- a/examples/bedrock/bedrock_example.py +++ b/examples/bedrock/bedrock_example.py @@ -1,20 +1,45 @@ import os +import logging from dotenv import load_dotenv import ldclient from ldclient import Context from ldclient.config import Config from ldai import LDAIClient +from ldai.tracker import TokenUsage +from ldai.providers import LDAIMetrics import boto3 load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + client = boto3.client("bedrock-runtime", region_name=os.getenv('AWS_DEFAULT_REGION', 'us-east-1')) + +def get_bedrock_metrics(response): + """Extract metrics from a Bedrock converse response.""" + status_code = response.get("ResponseMetadata", {}).get("HTTPStatusCode", 0) + success = status_code == 200 + + usage = None + if response.get("usage"): + u = response["usage"] + usage = TokenUsage( + total=u.get("totalTokens", 0), + input=u.get("inputTokens", 0), + output=u.get("outputTokens", 0), + ) + + duration_ms = response.get("metrics", {}).get("latencyMs") + + return LDAIMetrics(success=success, usage=usage, duration_ms=duration_ms) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') def main(): if not sdk_key: @@ -69,25 +94,35 @@ def main(): chat_messages = [{'role': msg.role, 'content': [{'text': msg.content}]} for msg in config_value.messages if msg.role != 'system'] system_messages = [{'text': msg.content} for msg in config_value.messages if msg.role == 'system'] - # Add the user input to the conversation - USER_INPUT = "What can you help me with?" - print("User Input:\n", USER_INPUT) - chat_messages.append({'role': 'user', 'content': [{'text': USER_INPUT}]}) + SAMPLE_QUESTION = "What can you help me with?" + chat_messages.append({'role': 'user', 'content': [{'text': SAMPLE_QUESTION}]}) - converse = tracker.track_bedrock_converse_metrics( - client.converse( + print(f'\nSending sample question to {config_value.model.name}: "{SAMPLE_QUESTION}"') + print("Waiting for response...") + + converse = tracker.track_metrics_of( + get_bedrock_metrics, + lambda: client.converse( modelId=config_value.model.name, messages=chat_messages, system=system_messages, - ) + ), ) - # Append the AI response to the conversation history chat_messages.append(converse["output"]["message"]) - print("AI Response:\n", converse["output"]["message"]["content"][0]["text"]) - # Continue the conversation by adding user input to the messages list and invoking the LLM again. - print("Success.") + print(f"\nModel response:\n{converse['output']['message']['content'][0]['text']}") + + summary = tracker.get_summary() + print("\nDone! The AI config was evaluated and the following metrics were tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") # Flush pending events and close the client. ldclient.get().flush() diff --git a/examples/bedrock/pyproject.toml b/examples/bedrock/pyproject.toml index 622d5ae..ddf72f9 100644 --- a/examples/bedrock/pyproject.toml +++ b/examples/bedrock/pyproject.toml @@ -8,12 +8,12 @@ readme = "README.md" packages = [{include = "bedrock_example.py"}] [tool.poetry.scripts] -bedrock-example = "bedrock_example:main" +bedrock = "bedrock_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" +launchdarkly-server-sdk-ai = ">=0.19.0" boto3 = ">=0.2.0" [build-system] diff --git a/examples/chat_observability/README.md b/examples/chat_observability/README.md index 6af7b83..452d9d1 100644 --- a/examples/chat_observability/README.md +++ b/examples/chat_observability/README.md @@ -13,21 +13,23 @@ View your data in the LaunchDarkly dashboard under **Observability** tabs. - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - An API key for your AI provider (e.g., OpenAI) ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with a model and a system message. Default key: `sample-completion-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config OPENAI_API_KEY=your-openai-api-key ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. - Optionally, set service identification: ``` @@ -35,7 +37,7 @@ View your data in the LaunchDarkly dashboard under **Observability** tabs. SERVICE_VERSION=1.0.0 ``` -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -44,5 +46,5 @@ View your data in the LaunchDarkly dashboard under **Observability** tabs. ## Run ```bash -poetry run chat-observability-example +poetry run chat ``` diff --git a/examples/chat_observability/chat_observability_example.py b/examples/chat_observability/chat_observability_example.py index 6a5cbbf..66efe00 100644 --- a/examples/chat_observability/chat_observability_example.py +++ b/examples/chat_observability/chat_observability_example.py @@ -1,7 +1,7 @@ import os +import logging from dotenv import load_dotenv import asyncio -import logging import ldclient from ldclient import Context from ldclient.config import Config @@ -10,13 +10,15 @@ load_dotenv() +logging.basicConfig() logging.getLogger('ldclient').setLevel(logging.WARNING) +logging.getLogger('httpx').setLevel(logging.WARNING) # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') # Service configuration for observability service_name = os.getenv('SERVICE_NAME', 'hello-python-ai-observability') @@ -85,27 +87,29 @@ async def async_main(): print(f"*** Failed to create chat for key: {ai_config_key}") return - user_input_1 = "What is feature flagging in 2 sentences?" - print("User Input:", user_input_1) - - response_1 = await chat.invoke(user_input_1) - print("Chat Response:", response_1.message.content) + sample_question_1 = "What is feature flagging in 2 sentences?" + print(f'\nSending sample question: "{sample_question_1}"') + print("Waiting for response...") + + response_1 = await chat.run(sample_question_1) + print(f"\nModel response:\n{response_1.content}") - user_input_2 = "Give me a specific use case example." - print("\nUser Input:", user_input_2) + sample_question_2 = "Give me a specific use case example." + print(f'\nSending follow-up question: "{sample_question_2}"') + print("Waiting for response...") - response_2 = await chat.invoke(user_input_2) - print("Chat Response:", response_2.message.content) + response_2 = await chat.run(sample_question_2) + print(f"\nModel response:\n{response_2.content}") - # Judge evaluations run asynchronously. Await them (e.g. with asyncio.gather) so they + # Judge evaluations run asynchronously. Await them so they # complete before the process or request ends—even if you don't need to log or use # the results. - if response_1.evaluations: - await asyncio.gather(*response_1.evaluations) - if response_2.evaluations: - await asyncio.gather(*response_2.evaluations) + if response_1.evaluations is not None: + await response_1.evaluations + if response_2.evaluations is not None: + await response_2.evaluations - print("\nSuccess.") + print("\nDone! The AI config was evaluated with observability enabled.") except Exception as err: print("Error:", err) diff --git a/examples/chat_observability/pyproject.toml b/examples/chat_observability/pyproject.toml index d098273..4dd142c 100644 --- a/examples/chat_observability/pyproject.toml +++ b/examples/chat_observability/pyproject.toml @@ -8,15 +8,15 @@ readme = "README.md" packages = [{include = "chat_observability_example.py"}] [tool.poetry.scripts] -chat-observability-example = "chat_observability_example:main" +chat = "chat_observability_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" +launchdarkly-server-sdk-ai = ">=0.19.0" launchdarkly-observability = ">=0.1.0" -launchdarkly-server-sdk-ai-openai = ">=0.4.0" -launchdarkly-server-sdk-ai-langchain = ">=0.5.0" +launchdarkly-server-sdk-ai-openai = ">=0.5.0" +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" openai = ">=0.2.0" [build-system] diff --git a/examples/gemini/README.md b/examples/gemini/README.md index 5b7422d..9abe617 100644 --- a/examples/gemini/README.md +++ b/examples/gemini/README.md @@ -6,22 +6,24 @@ This example demonstrates how to use LaunchDarkly's AI Config with the Google Ge - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - A [Google API key](https://aistudio.google.com/apikey) ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with a Gemini model (e.g. `gemini-2.0-flash`) and a system message. Default key: `sample-completion-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config GOOGLE_API_KEY=your-google-api-key ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. - -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -30,5 +32,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with the Google Ge ## Run ```bash -poetry run gemini-example +poetry run gemini ``` diff --git a/examples/gemini/gemini_example.py b/examples/gemini/gemini_example.py index 7732314..3f2b2f8 100644 --- a/examples/gemini/gemini_example.py +++ b/examples/gemini/gemini_example.py @@ -1,4 +1,5 @@ import os +import logging from dotenv import load_dotenv import ldclient from ldclient import Context @@ -11,11 +12,14 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') # Set Google API key google_api_key = os.getenv('GOOGLE_API_KEY') @@ -143,12 +147,13 @@ def main(): # Convert LaunchDarkly messages to Google AI format using the helper function system_instruction, messages = map_to_google_ai_messages(config_value.messages or []) - # Add the user input to the conversation - USER_INPUT = "What can you help me with?" - print("User Input:\n", USER_INPUT) - user_message = types.Content(role="user", parts=[types.Part(text=USER_INPUT)]) + SAMPLE_QUESTION = "What can you help me with?" + user_message = types.Content(role="user", parts=[types.Part(text=SAMPLE_QUESTION)]) messages.append(user_message) + print(f'\nSending sample question to {config_value.model.name}: "{SAMPLE_QUESTION}"') + print("Waiting for response...") + completion = track_genai_metrics(tracker, lambda: client.models.generate_content( model=config_value.model.name, contents=messages, @@ -158,13 +163,21 @@ def main(): )) ai_response = completion.text - # Add the AI response to the conversation history ai_message = types.Content(role="model", parts=[types.Part(text=ai_response)]) messages.append(ai_message) - print("AI Response:\n", ai_response) - # Continue the conversation by adding user input to the messages list and invoking the LLM again. - print("Success.") + print(f"\nModel response:\n{ai_response}") + + summary = tracker.get_summary() + print("\nDone! The AI config was evaluated and the following metrics were tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") # Flush pending events and close the client. ldclient.get().flush() diff --git a/examples/gemini/pyproject.toml b/examples/gemini/pyproject.toml index aac3ae2..ebd1bcd 100644 --- a/examples/gemini/pyproject.toml +++ b/examples/gemini/pyproject.toml @@ -8,12 +8,12 @@ readme = "README.md" packages = [{include = "gemini_example.py"}] [tool.poetry.scripts] -gemini-example = "gemini_example:main" +gemini = "gemini_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" +launchdarkly-server-sdk-ai = ">=0.19.0" google-genai = "^1.30.0" [build-system] diff --git a/examples/judge/README.md b/examples/judge/README.md index 1f0ccf6..7041c9d 100644 --- a/examples/judge/README.md +++ b/examples/judge/README.md @@ -6,30 +6,31 @@ These examples demonstrate how to use LaunchDarkly's judge functionality to eval - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created for chat functionality -- A [Judge Config](https://launchdarkly.com/docs/home/ai-configs/judges) created for evaluation +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - API keys for the provider you want to use (OpenAI, Bedrock, or Gemini) ## Setup +1. Create the following configs in your LaunchDarkly project. You can use different keys by setting the environment variables in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with a model and system message. Default key: `sample-completion-config`. + - [Create a Judge Config](https://launchdarkly.com/docs/home/ai-configs/judges) for evaluation. Default key: `sample-ai-judge`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config - LAUNCHDARKLY_AI_JUDGE_KEY=sample-ai-judge-accuracy + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config + LAUNCHDARKLY_AI_JUDGE_KEY=sample-ai-judge ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. - > `LAUNCHDARKLY_AI_JUDGE_KEY` defaults to `sample-ai-judge-accuracy` if not set. - Add the API key for your chosen provider: ``` OPENAI_API_KEY=your-openai-api-key ``` -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -42,7 +43,7 @@ These examples demonstrate how to use LaunchDarkly's judge functionality to eval Uses the chat functionality which automatically evaluates responses with any judges defined in the AI config. ```bash -poetry run chat-judge-example +poetry run chat-judge ``` ### Direct judge evaluation @@ -50,5 +51,5 @@ poetry run chat-judge-example Evaluates specific input/output pairs using a judge configuration directly. ```bash -poetry run direct-judge-example +poetry run direct-judge ``` diff --git a/examples/judge/chat_judge_example.py b/examples/judge/chat_judge_example.py index e58dabb..0e4a26c 100644 --- a/examples/judge/chat_judge_example.py +++ b/examples/judge/chat_judge_example.py @@ -1,6 +1,6 @@ import os +import logging from dotenv import load_dotenv -import json import asyncio import ldclient from ldclient import Context @@ -9,11 +9,14 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') async def async_main(): @@ -59,45 +62,34 @@ async def async_main(): print(f"*** Failed to create chat for key: {ai_config_key}") return - print("\n*** Starting chat with automatic judge evaluation:") - user_input = 'How can LaunchDarkly help me?' - print("User Input:", user_input) - - # The invoke method will automatically evaluate the chat response with any judges defined in the AI config - chat_response = await chat.invoke(user_input) - print("Chat Response:", chat_response.message.content) - - # Judge evaluations run asynchronously. Await them (e.g. with asyncio.gather) so they - # complete before the process or request ends—even if you don't need to log or use - # the results. Below we await and then log the results for demonstration. - - # Log judge evaluation results with full detail - if chat_response.evaluations is not None and len(chat_response.evaluations) > 0: - # Note: Judge evaluations run asynchronously and do not block your application. - # Results are automatically sent to LaunchDarkly for AI config metrics. - # You only need to await if you want to access the evaluation results in your code. - print("\nNote: Awaiting judge results (optional - done here for demonstration only).") - eval_results = await asyncio.gather(*chat_response.evaluations) - - # Convert results, replacing None with a message - results_to_display = [ - result.to_dict() if result is not None else "not evaluated" - for result in eval_results - ] - + sample_question = 'How can LaunchDarkly help me?' + print(f'\nSending sample question: "{sample_question}"') + print("Waiting for response...") + + chat_response = await chat.run(sample_question) + print(f"\nModel response:\n{chat_response.content}") + + # Judge evaluations run asynchronously. Await them so they complete before the + # process or request ends—even if you don't need to log or use the results. + + if chat_response.evaluations is not None: + eval_results = await chat_response.evaluations + print("Judge results:") - print(json.dumps(results_to_display, indent=2, default=str)) - - if None in eval_results: - print("\nNote: Some judge evaluations were skipped.") - print("This typically happens when the sample rate doesn't require this evaluation, or due to a configuration issue.") - print("Check application logs for more details.") + for result in eval_results: + print(f"- judge_config_key: {result.judge_config_key}") + print(f" sampled: {result.sampled}") + if not result.sampled: + continue + print(f" success: {result.success}") + print(f" error_message: {result.error_message}") + print(f" metric_key: {result.metric_key}") + print(f" score: {result.score}") + print(f" reasoning: {result.reasoning}") + else: - print("\nNo judge evaluations were performed.") - print("This typically happens when the sample rate doesn't require this evaluation, or due to a configuration issue.") - print("Check application logs for more details.") + print("\nNo judge evaluations were performed. Try adding a judge to the AI config to see results.") - print("Success.") except Exception as err: print("Error:", err) finally: diff --git a/examples/judge/direct_judge_example.py b/examples/judge/direct_judge_example.py index 85369ac..6650d32 100644 --- a/examples/judge/direct_judge_example.py +++ b/examples/judge/direct_judge_example.py @@ -1,6 +1,6 @@ import os +import logging from dotenv import load_dotenv -import json import asyncio import ldclient from ldclient import Context @@ -9,11 +9,14 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set judge_key to the Judge key you want to use. -judge_key = os.getenv('LAUNCHDARKLY_AI_JUDGE_KEY', 'sample-ai-judge-accuracy') +judge_key = os.getenv('LAUNCHDARKLY_AI_JUDGE_KEY', 'sample-ai-judge') async def async_main(): @@ -54,38 +57,38 @@ async def async_main(): # {'role': 'user', 'content': 'RESPONSE TO EVALUATE: {{response_to_evaluate}}'}, # ], # ) - # judge = await aiclient.create_judge(judge_key, context, default) - judge = await aiclient.create_judge(judge_key, context) + # judge = aiclient.create_judge(judge_key, context, default) + judge = aiclient.create_judge(judge_key, context) if not judge: print(f"*** Failed to create judge for key: {judge_key}") return - print("\n*** Starting direct judge evaluation of input and output:") input_text = 'You are a helpful assistant for the company LaunchDarkly. How can you help me?' output_text = 'I can answer any question you have except for questions about the company LaunchDarkly.' - print("Input:", input_text) - print("Output:", output_text) - - judge_response = await judge.evaluate(input_text, output_text) + print(f'\nEvaluating a sample input/output pair with the judge:') + print(f' Sample input: "{input_text}"') + print(f' Sample output: "{output_text}"') + print("Waiting for judge evaluation...") - if judge_response is None: - print("\nJudge evaluation was skipped.") - print("This typically happens when the sample rate doesn't require this evaluation, or due to a configuration issue.") - print("Check application logs for more details.") - return + judge_result = await judge.evaluate(input_text, output_text) # Track the judge evaluation scores on the tracker for the aiConfig you are evaluating # Example: - # aiConfig.tracker.track_eval_scores(judge_response.evals) - - # Convert JudgeResponse to dict for display using to_dict() - judge_response_dict = judge_response.to_dict() - print("Judge Response:") - print(json.dumps(judge_response_dict, indent=2, default=str)) - - print("Success.") + # aiConfig.create_tracker().track_judge_result(judge_result) + + print("\nJudge result:") + print(f"- judge_config_key: {judge_key}") + print(f" sampled: {judge_result.sampled}") + if judge_result.sampled: + print(f" success: {judge_result.success}") + print(f" error_message: {judge_result.error_message}") + print(f" metric_key: {judge_result.metric_key}") + print(f" score: {judge_result.score}") + print(f" reasoning: {judge_result.reasoning}") + + print("\nDone!") except Exception as err: print("Error:", err) finally: diff --git a/examples/judge/pyproject.toml b/examples/judge/pyproject.toml index e45fdf4..c5b1818 100644 --- a/examples/judge/pyproject.toml +++ b/examples/judge/pyproject.toml @@ -11,16 +11,17 @@ packages = [ ] [tool.poetry.scripts] -chat-judge-example = "chat_judge_example:main" -direct-judge-example = "direct_judge_example:main" +chat-judge = "chat_judge_example:main" +direct-judge = "direct_judge_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" -launchdarkly-server-sdk-ai-openai = "^0.4.0" -launchdarkly-server-sdk-ai-langchain = "^0.5.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-openai = ">=0.5.0" +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" openai = ">=1.0.0" +langchain-openai = "^1.0.0" [build-system] requires = ["poetry-core"] diff --git a/examples/langchain/README.md b/examples/langchain/README.md index a4827df..5f42e0e 100644 --- a/examples/langchain/README.md +++ b/examples/langchain/README.md @@ -6,20 +6,22 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangChain, su - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - API keys for the providers you want to use ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with a model and a system message. Default key: `sample-completion-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. - Add the API keys for the providers you want to use: ``` @@ -29,7 +31,7 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangChain, su AWS_SECRET_ACCESS_KEY=your-secret-access-key ``` -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -38,5 +40,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangChain, su ## Run ```bash -poetry run langchain-example +poetry run langchain ``` diff --git a/examples/langchain/langchain_example.py b/examples/langchain/langchain_example.py index 232736c..8f09121 100644 --- a/examples/langchain/langchain_example.py +++ b/examples/langchain/langchain_example.py @@ -1,4 +1,5 @@ import os +import logging from dotenv import load_dotenv import asyncio import ldclient @@ -10,11 +11,14 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') def map_provider_to_langchain(provider_name): """Map LaunchDarkly provider names to LangChain provider names.""" @@ -74,35 +78,40 @@ async def async_main(): tracker = config_value.create_tracker() try: - # Create LangChain model instance using init_chat_model - # Map the provider from config_value to LangChain format - print("Model:", config_value.model.name, "Provider:", config_value.provider.name) langchain_provider = map_provider_to_langchain(config_value.provider.name) llm = init_chat_model( model=config_value.model.name, model_provider=langchain_provider, ) - + messages = [message.to_dict() for message in (config_value.messages or [])] - # Add the user input to the conversation - USER_INPUT = "What can you help me with?" - print("User Input:\n", USER_INPUT) - messages.append({'role': 'user', 'content': USER_INPUT}) + SAMPLE_QUESTION = "What can you help me with?" + messages.append({'role': 'user', 'content': SAMPLE_QUESTION}) - # Track the LangChain completion with LaunchDarkly metrics using the LD LangChain provider's extractor - completion = await tracker.track_metrics_of( - lambda: llm.ainvoke(messages), + print(f'\nSending sample question to {config_value.model.name} via LangChain ({langchain_provider}): "{SAMPLE_QUESTION}"') + print("Waiting for response...") + + completion = await tracker.track_metrics_of_async( get_ai_metrics_from_response, + lambda: llm.ainvoke(messages), ) ai_response = completion.content - # Add the AI response to the conversation history. messages.append({'role': 'assistant', 'content': ai_response}) - print("AI Response:\n", ai_response) - # Continue the conversation by adding user input to the messages list and invoking the LLM again. - print("Success.") + print(f"\nModel response:\n{ai_response}") + + summary = tracker.get_summary() + print("\nDone! The AI config was evaluated and the following metrics were tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") except Exception as e: print(f"Error during completion: {e}") diff --git a/examples/langchain/pyproject.toml b/examples/langchain/pyproject.toml index 6da9bc0..e416120 100644 --- a/examples/langchain/pyproject.toml +++ b/examples/langchain/pyproject.toml @@ -8,13 +8,13 @@ readme = "README.md" packages = [{include = "langchain_example.py"}] [tool.poetry.scripts] -langchain-example = "langchain_example:main" +langchain = "langchain_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" -launchdarkly-server-sdk-ai-langchain = "^0.5.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" langchain = "^1.0.0" langchain-core = "^1.0.0" langchain-openai = "^1.0.0" diff --git a/examples/langgraph_agent/README.md b/examples/langgraph_agent/README.md index a5be720..d94d8c7 100644 --- a/examples/langgraph_agent/README.md +++ b/examples/langgraph_agent/README.md @@ -6,20 +6,22 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config (Agent-based)](https://launchdarkly.com/docs/home/ai-configs/agents) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - API keys for the providers you want to use ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Agent Config](https://launchdarkly.com/docs/home/ai-configs/agents) with a model and agent instructions. Default key: `sample-agent-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AGENT_CONFIG_KEY=sample-ai-agent-config + LAUNCHDARKLY_AGENT_CONFIG_KEY=sample-agent-config ``` - > `LAUNCHDARKLY_AGENT_CONFIG_KEY` defaults to `sample-ai-agent-config` if not set. - Add the API keys for the providers you want to use: ``` @@ -29,7 +31,7 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to AWS_SECRET_ACCESS_KEY=your-secret-access-key ``` -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -38,5 +40,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to ## Run ```bash -poetry run langgraph-agent-example +poetry run agent ``` diff --git a/examples/langgraph_agent/langgraph_agent_example.py b/examples/langgraph_agent/langgraph_agent_example.py index 2778bd6..e91e85a 100644 --- a/examples/langgraph_agent/langgraph_agent_example.py +++ b/examples/langgraph_agent/langgraph_agent_example.py @@ -1,22 +1,25 @@ import os +import logging from dotenv import load_dotenv import ldclient -from pprint import pprint from ldclient import Context from ldclient.config import Config from ldai import LDAIClient -from ldai.tracker import TokenUsage -from ldai_langchain import get_ai_metrics_from_response +from ldai.providers import LDAIMetrics +from ldai_langchain import sum_token_usage_from_messages from langchain.chat_models import init_chat_model from langgraph.prebuilt import create_react_agent load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config key for the agent -agent_config_key = os.getenv('LAUNCHDARKLY_AGENT_CONFIG_KEY', 'sample-ai-agent-config') +agent_config_key = os.getenv('LAUNCHDARKLY_AGENT_CONFIG_KEY', 'sample-agent-config') def map_provider_to_langchain(provider_name): """Map LaunchDarkly provider names to LangChain provider names.""" @@ -26,36 +29,10 @@ def map_provider_to_langchain(provider_name): lower_provider = provider_name.lower() return provider_mapping.get(lower_provider, lower_provider) -def track_langgraph_metrics(tracker, func): - """ - Track LangGraph agent operations with LaunchDarkly metrics. - """ - try: - result = tracker.track_duration_of(func) - tracker.track_success() - - total_input_tokens = 0 - total_output_tokens = 0 - total_tokens = 0 - if "messages" in result: - for message in result["messages"]: - metrics = get_ai_metrics_from_response(message) - if metrics.usage: - total_input_tokens += metrics.usage.input - total_output_tokens += metrics.usage.output - total_tokens += metrics.usage.total - if total_tokens > 0: - tracker.track_tokens( - TokenUsage( - input=total_input_tokens, - output=total_output_tokens, - total=total_tokens, - ) - ) - except Exception: - tracker.track_error() - raise - return result +def get_langgraph_metrics(response): + """Extract aggregated metrics from a LangGraph agent response.""" + messages = response.get("messages", []) + return LDAIMetrics(success=True, usage=sum_token_usage_from_messages(messages)) def get_weather(city: str) -> str: """Get the weather for a given city.""" @@ -83,8 +60,7 @@ def main(): .build() ) - print(f"🔍 Using agent config: {agent_config_key}") - print() + print(f"\nUsing agent config: {agent_config_key}") # Pass a default for improved resiliency when the agent config is unavailable # or LaunchDarkly is unreachable; omit for a disabled default. @@ -113,17 +89,35 @@ def main(): prompt=agent_config.instructions ) + SAMPLE_QUESTION = "What is the weather in Tokyo?" + + print(f'\nSending sample question to {agent_config.model.name} agent: "{SAMPLE_QUESTION}"') + print("Waiting for response...") + try: - # Track and execute the agent - response = track_langgraph_metrics(agent_config.create_tracker(), lambda: agent.invoke({ - "messages": [{"role": "user", "content": "What is the weather in Tokyo?"}] - })) - - print("Agent response:") - print(response["messages"][-1].content) - + tracker = agent_config.create_tracker() + response = tracker.track_metrics_of( + get_langgraph_metrics, + lambda: agent.invoke({ + "messages": [{"role": "user", "content": SAMPLE_QUESTION}] + }), + ) + + print(f"\nAgent response:\n{response['messages'][-1].content}") + + summary = tracker.get_summary() + print("\nDone! The agent config was evaluated and the following metrics were tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") + except Exception as e: - print(f"Error: {e}") + print(f"\nError: {e}") print("Please ensure you have the correct API keys and credentials set up for the detected providers.") # Flush pending events and close the client. diff --git a/examples/langgraph_agent/pyproject.toml b/examples/langgraph_agent/pyproject.toml index a36b704..0e3e870 100644 --- a/examples/langgraph_agent/pyproject.toml +++ b/examples/langgraph_agent/pyproject.toml @@ -8,13 +8,13 @@ readme = "README.md" packages = [{include = "langgraph_agent_example.py"}] [tool.poetry.scripts] -langgraph-agent-example = "langgraph_agent_example:main" +agent = "langgraph_agent_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" -launchdarkly-server-sdk-ai-langchain = "^0.5.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" langchain = "^1.0.0" langchain-core = "^1.0.0" langchain-openai = "^1.0.0" diff --git a/examples/langgraph_multi_agent/README.md b/examples/langgraph_multi_agent/README.md index 00e3566..1388a02 100644 --- a/examples/langgraph_multi_agent/README.md +++ b/examples/langgraph_multi_agent/README.md @@ -6,11 +6,16 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with [AI Configs (Agent-based)](https://launchdarkly.com/docs/home/ai-configs/agents) created using the keys below. Write a goal for each config and enable it with targeting rules. +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - API keys for the providers you want to use ## Setup +1. Create the following configs in your LaunchDarkly project. You can use different keys by setting the environment variables in your `.env`. + + - [Create an AI Agent Config](https://launchdarkly.com/docs/home/ai-configs/agents) for code analysis. Default key: `code-review-analyzer`. + - [Create an AI Agent Config](https://launchdarkly.com/docs/home/ai-configs/agents) for documentation generation. Default key: `code-review-documentation`. + 1. Create a `.env` file in this directory with the following variables: ``` @@ -19,9 +24,6 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to LAUNCHDARKLY_DOCUMENTATION_CONFIG_KEY=code-review-documentation ``` - > `LAUNCHDARKLY_ANALYZER_CONFIG_KEY` defaults to `code-review-analyzer` if not set. - > `LAUNCHDARKLY_DOCUMENTATION_CONFIG_KEY` defaults to `code-review-documentation` if not set. - Add the API keys for the providers you want to use: ``` @@ -31,7 +33,7 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to AWS_SECRET_ACCESS_KEY=your-secret-access-key ``` -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -40,5 +42,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with LangGraph to ## Run ```bash -poetry run langgraph-multi-agent-example +poetry run agent-graph ``` diff --git a/examples/langgraph_multi_agent/langgraph_multi_agent_example.py b/examples/langgraph_multi_agent/langgraph_multi_agent_example.py index 2ce8c8c..58246cd 100644 --- a/examples/langgraph_multi_agent/langgraph_multi_agent_example.py +++ b/examples/langgraph_multi_agent/langgraph_multi_agent_example.py @@ -1,4 +1,5 @@ import os +import logging from dotenv import load_dotenv import ldclient from ldclient import Context @@ -14,6 +15,9 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') @@ -106,7 +110,7 @@ def ai_node( next_step: str ) -> Command: """Unified function to process code with AI agents (analysis or documentation).""" - print(f"Starting node for {config_key}") + print(f"\nStarting node for {config_key}...") try: agent, tracker, disabled = create_agent_with_config( @@ -143,7 +147,7 @@ def ai_node( ) except Exception as e: - print(f"❌ Error in node for {config_key}: {e}") + print(f"Error in node for {config_key}: {e}") return Command( goto=END, update={ @@ -154,7 +158,7 @@ def ai_node( def create_final_report(state: CodeReviewState) -> Command: """Combine analysis and documentation into a final report.""" - print("Creating final report") + print("\nCreating final report...") # Use the stored analysis and documentation from state analysis = state.get("analysis", "No analysis available") @@ -171,7 +175,7 @@ def create_final_report(state: CodeReviewState) -> Command: --- *This report was generated by the LaunchDarkly Code Review Duo using LangGraph*""" - print("✅ Final report created") + print("Final report created.") return Command( goto=END, @@ -225,10 +229,9 @@ def calculate_average(numbers): return total / count ''' - print("🔍 Starting Code Review Duo with LangGraph...") - print(f"📋 Using analyzer config: {analyzer_config_key}") - print(f"📝 Using documentation config: {documentation_config_key}") - print() + print("\nStarting Code Review Duo with LangGraph...") + print(f"Using analyzer config: {analyzer_config_key}") + print(f"Using documentation config: {documentation_config_key}") # Create the workflow graph with custom state workflow = StateGraph(CodeReviewState) @@ -262,7 +265,7 @@ def calculate_average(numbers): result = app.invoke(initial_state) print("\n" + "="*80) - print("📊 FINAL CODE REVIEW REPORT") + print("FINAL CODE REVIEW REPORT") print("="*80) # Use the final report from state @@ -271,7 +274,7 @@ def calculate_average(numbers): print("="*80) except Exception as e: - print(f"❌ Error during workflow execution: {e}") + print(f"Error during workflow execution: {e}") print("Please ensure you have the correct API keys and credentials set up for the detected providers.") # Flush pending events and close the client. diff --git a/examples/langgraph_multi_agent/pyproject.toml b/examples/langgraph_multi_agent/pyproject.toml index 723be83..1617b7e 100644 --- a/examples/langgraph_multi_agent/pyproject.toml +++ b/examples/langgraph_multi_agent/pyproject.toml @@ -8,13 +8,13 @@ readme = "README.md" packages = [{include = "langgraph_multi_agent_example.py"}] [tool.poetry.scripts] -langgraph-multi-agent-example = "langgraph_multi_agent_example:main" +agent-graph = "langgraph_multi_agent_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" -launchdarkly-server-sdk-ai-langchain = "^0.5.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" langchain = "^1.0.0" langchain-core = "^1.0.0" langchain-openai = "^1.0.0" diff --git a/examples/managed_agent/README.md b/examples/managed_agent/README.md new file mode 100644 index 0000000..520afc0 --- /dev/null +++ b/examples/managed_agent/README.md @@ -0,0 +1,36 @@ +# Managed Agent Example + +This example demonstrates how to use LaunchDarkly's managed agent functionality, which handles model creation, metric tracking, and judge evaluation dispatch automatically. + +## Prerequisites + +- Python 3.10 or higher +- [Poetry](https://python-poetry.org/) installed +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key +- API keys for the provider you want to use (OpenAI, Bedrock, or Gemini) + +## Setup + +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Agent Config](https://launchdarkly.com/docs/home/ai-configs/agents) with a model and agent instructions. Default key: `sample-agent-config`. + +1. Create a `.env` file in this directory with the following variables: + + ``` + LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key + LAUNCHDARKLY_AGENT_CONFIG_KEY=sample-agent-config + OPENAI_API_KEY=your-openai-api-key + ``` + +1. Install the required dependencies: + + ```bash + poetry install + ``` + +## Run + +```bash +poetry run agent +``` diff --git a/examples/managed_agent/managed_agent_example.py b/examples/managed_agent/managed_agent_example.py new file mode 100644 index 0000000..5ac5b48 --- /dev/null +++ b/examples/managed_agent/managed_agent_example.py @@ -0,0 +1,121 @@ +import os +import logging +from dotenv import load_dotenv +import asyncio +import ldclient +from ldclient import Context +from ldclient.config import Config +from ldai import LDAIClient, AIAgentConfigDefault + +load_dotenv() + +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + +# Set sdk_key to your LaunchDarkly SDK key. +sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') + +# Set agent_config_key to the AI Agent Config key you want to evaluate. +agent_config_key = os.getenv('LAUNCHDARKLY_AGENT_CONFIG_KEY', 'sample-agent-config') + + +def get_weather(city: str) -> str: + """Get the weather for a given city.""" + return f"The weather in {city} is sunny." + + +async def async_main(): + if not sdk_key: + print("*** Please set the LAUNCHDARKLY_SDK_KEY env first") + exit() + + ldclient.set_config(Config(sdk_key)) + + if not ldclient.get().is_initialized(): + print("*** SDK failed to initialize. Please check your internet connection and SDK credential for any typo.") + exit() + + aiclient = LDAIClient(ldclient.get()) + print("*** SDK successfully initialized") + + # Set up the evaluation context. This context should appear on your + # LaunchDarkly contexts dashboard soon after you run the demo. + context = ( + Context + .builder('example-user-key') + .kind('user') + .name('Sandy') + .build() + ) + + try: + # Pass a default for improved resiliency when the agent config is unavailable + # or LaunchDarkly is unreachable; omit for a disabled default. + # Example: + # default = AIAgentConfigDefault( + # enabled=True, + # model={'name': 'gpt-4'}, + # provider={'name': 'openai'}, + # instructions='You are a helpful weather assistant.', + # ) + # agent = await aiclient.create_agent(agent_config_key, context, tools={'get_weather': get_weather}, default=default) + agent = await aiclient.create_agent( + agent_config_key, + context, + tools={'get_weather': get_weather}, + ) + + if not agent: + print(f"*** Failed to create agent for key: {agent_config_key}") + return + + sample_question = 'What is the weather in Tokyo?' + print(f'\nSending sample question: "{sample_question}"') + print("Waiting for response...") + + agent_response = await agent.run(sample_question) + print(f"\nAgent response:\n{agent_response.content}") + + summary = agent_response.metrics + print("\nMetrics tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") + + if agent_response.evaluations is not None: + eval_results = await agent_response.evaluations + + print("\nJudge results:") + for result in eval_results: + print(f"- judge_config_key: {result.judge_config_key}") + print(f" sampled: {result.sampled}") + if not result.sampled: + continue + print(f" success: {result.success}") + print(f" error_message: {result.error_message}") + print(f" metric_key: {result.metric_key}") + print(f" score: {result.score}") + print(f" reasoning: {result.reasoning}") + else: + print("\nNo judge evaluations were performed.") + + except Exception as err: + print("Error:", err) + finally: + # Flush pending events and close the client. + ldclient.get().flush() + ldclient.get().close() + + +def main(): + """Synchronous entry point for Poetry script.""" + asyncio.run(async_main()) + + +if __name__ == "__main__": + main() diff --git a/examples/managed_agent/pyproject.toml b/examples/managed_agent/pyproject.toml new file mode 100644 index 0000000..ddf727c --- /dev/null +++ b/examples/managed_agent/pyproject.toml @@ -0,0 +1,24 @@ +[tool.poetry] +name = "hello-python-ai-managed-agent" +version = "0.1.0" +description = "Hello LaunchDarkly for Python AI - Managed Agent" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +packages = [{include = "managed_agent_example.py"}] + +[tool.poetry.scripts] +agent = "managed_agent_example:main" + +[tool.poetry.dependencies] +python = "^3.10" +python-dotenv = ">=1.0.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-openai = {version = ">=0.5.0", extras = ["agents"]} +launchdarkly-server-sdk-ai-langchain = ">=0.6.0" +openai = ">=1.0.0" +langchain-openai = "^1.0.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/examples/managed_agent_graph/README.md b/examples/managed_agent_graph/README.md new file mode 100644 index 0000000..a75cebd --- /dev/null +++ b/examples/managed_agent_graph/README.md @@ -0,0 +1,37 @@ +# Managed Agent Graph Example + +This example demonstrates how to use LaunchDarkly's managed agent graph functionality, which orchestrates multi-node agent workflows with automatic metric tracking at both the graph and per-node level. + +## Prerequisites + +- Python 3.10 or higher +- [Poetry](https://python-poetry.org/) installed +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key +- API keys for the provider you want to use (OpenAI, Bedrock, or Gemini) + +## Setup + +1. Create the following configs in your LaunchDarkly project. You can use different keys by setting the environment variables in your `.env`. + + - [Create AI Agent Configs](https://launchdarkly.com/docs/home/ai-configs/agents) for each node in your graph. Configure each with a model and agent instructions. Add tools (e.g. `search_flights`, `search_hotels`, `get_weather`) to the agents that need them. + - [Create an Agent Graph](https://launchdarkly.com/docs/home/ai-configs/create) that connects your agent configs as nodes with edges defining the workflow. Default key: `sample-agent-graph`. + +1. Create a `.env` file in this directory with the following variables: + + ``` + LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key + LAUNCHDARKLY_AGENT_GRAPH_KEY=sample-agent-graph + OPENAI_API_KEY=your-openai-api-key + ``` + +1. Install the required dependencies: + + ```bash + poetry install + ``` + +## Run + +```bash +poetry run agent-graph +``` diff --git a/examples/managed_agent_graph/managed_agent_graph_example.py b/examples/managed_agent_graph/managed_agent_graph_example.py new file mode 100644 index 0000000..8d09452 --- /dev/null +++ b/examples/managed_agent_graph/managed_agent_graph_example.py @@ -0,0 +1,139 @@ +import os +import logging +from dotenv import load_dotenv +import asyncio +import ldclient +from ldclient import Context +from ldclient.config import Config +from ldai import LDAIClient + +load_dotenv() + +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + +# Set sdk_key to your LaunchDarkly SDK key. +sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') + +# Set graph_key to the Agent Graph key you want to evaluate. +graph_key = os.getenv('LAUNCHDARKLY_AGENT_GRAPH_KEY', 'sample-agent-graph') + + +def search_flights(destination: str, date: str) -> str: + """Search for available flights to a destination on a given date.""" + return f"Found 3 flights to {destination} on {date}: Flight A ($400), Flight B ($550), Flight C ($320)." + + +def search_hotels(destination: str, check_in: str, check_out: str) -> str: + """Search for available hotels at a destination.""" + return f"Found 2 hotels in {destination}: Hotel Sunrise ($150/night), Hotel Seaside ($220/night)." + + +def get_weather(city: str) -> str: + """Get the weather forecast for a given city.""" + return f"The weather in {city} is expected to be sunny with highs around 75°F." + + +async def async_main(): + if not sdk_key: + print("*** Please set the LAUNCHDARKLY_SDK_KEY env first") + exit() + + ldclient.set_config(Config(sdk_key)) + + if not ldclient.get().is_initialized(): + print("*** SDK failed to initialize. Please check your internet connection and SDK credential for any typo.") + exit() + + aiclient = LDAIClient(ldclient.get()) + print("*** SDK successfully initialized") + + # Set up the evaluation context. + context = ( + Context + .builder('example-user-key') + .kind('user') + .name('Sandy') + .build() + ) + + try: + graph = await aiclient.create_agent_graph( + graph_key, + context, + tools={ + 'search_flights': search_flights, + 'search_hotels': search_hotels, + 'get_weather': get_weather, + }, + ) + + if not graph: + print(f"*** Failed to create agent graph for key: {graph_key}") + return + + sample_question = 'Plan a trip to Tokyo next week. Find flights, hotels, and check the weather.' + print(f'\nSending sample question: "{sample_question}"') + print("Waiting for response...") + + result = await graph.run(sample_question) + print(f"\nGraph response:\n{result.content}") + + summary = result.metrics + print("\nGraph metrics:") + if summary.duration_ms is not None: + print(f" Duration: {summary.duration_ms}ms") + if summary.success is not None: + print(f" Success: {summary.success}") + if summary.path: + print(f" Path: {' -> '.join(summary.path)}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + + if summary.node_metrics: + print("\nPer-node metrics:") + for node_key, node_summary in summary.node_metrics.items(): + print(f" [{node_key}]") + if node_summary.duration_ms is not None: + print(f" Duration: {node_summary.duration_ms}ms") + if node_summary.success is not None: + print(f" Success: {node_summary.success}") + if node_summary.usage: + print(f" Input tokens: {node_summary.usage.input}") + print(f" Output tokens: {node_summary.usage.output}") + print(f" Total tokens: {node_summary.usage.total}") + if node_summary.tool_calls: + print(f" Tool calls: {', '.join(node_summary.tool_calls)}") + + if result.evaluations is not None: + eval_results = await result.evaluations + + print("\nJudge results:") + for eval_result in eval_results: + print(f"- judge_config_key: {eval_result.judge_config_key}") + print(f" sampled: {eval_result.sampled}") + if not eval_result.sampled: + continue + print(f" success: {eval_result.success}") + print(f" error_message: {eval_result.error_message}") + print(f" metric_key: {eval_result.metric_key}") + print(f" score: {eval_result.score}") + print(f" reasoning: {eval_result.reasoning}") + + except Exception as err: + print("Error:", err) + finally: + # Flush pending events and close the client. + ldclient.get().flush() + ldclient.get().close() + + +def main(): + """Synchronous entry point for Poetry script.""" + asyncio.run(async_main()) + + +if __name__ == "__main__": + main() diff --git a/examples/managed_agent_graph/pyproject.toml b/examples/managed_agent_graph/pyproject.toml new file mode 100644 index 0000000..8eaae37 --- /dev/null +++ b/examples/managed_agent_graph/pyproject.toml @@ -0,0 +1,24 @@ +[tool.poetry] +name = "hello-python-ai-managed-agent-graph" +version = "0.1.0" +description = "Hello LaunchDarkly for Python AI - Managed Agent Graph" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +packages = [{include = "managed_agent_graph_example.py"}] + +[tool.poetry.scripts] +agent-graph = "managed_agent_graph_example:main" + +[tool.poetry.dependencies] +python = "^3.10" +python-dotenv = ">=1.0.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-openai = {version = ">=0.5.0", extras = ["agents"]} +launchdarkly-server-sdk-ai-langchain = {version = ">=0.6.0", extras = ["graph"]} +openai = ">=1.0.0" +langchain-openai = "^1.0.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/examples/openai/README.md b/examples/openai/README.md index 87c94dc..cb81328 100644 --- a/examples/openai/README.md +++ b/examples/openai/README.md @@ -6,22 +6,24 @@ This example demonstrates how to use LaunchDarkly's AI Config with the OpenAI pr - Python 3.10 or higher - [Poetry](https://python-poetry.org/) installed -- A LaunchDarkly account with an [AI Config](https://launchdarkly.com/docs/home/ai-configs/create) created +- A [LaunchDarkly](https://launchdarkly.com/) account and SDK key - An [OpenAI API key](https://platform.openai.com/api-keys) ## Setup +1. Create the following config in your LaunchDarkly project. You can use a different key by setting the environment variable in your `.env`. + + - [Create an AI Config](https://launchdarkly.com/docs/home/ai-configs/create) with an OpenAI model (e.g. `gpt-4`) and a system message. Default key: `sample-completion-config`. + 1. Create a `.env` file in this directory with the following variables: ``` LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key - LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config + LAUNCHDARKLY_AI_CONFIG_KEY=sample-completion-config OPENAI_API_KEY=your-openai-api-key ``` - > `LAUNCHDARKLY_AI_CONFIG_KEY` defaults to `sample-ai-config` if not set. - -2. Install the required dependencies: +1. Install the required dependencies: ```bash poetry install @@ -30,5 +32,5 @@ This example demonstrates how to use LaunchDarkly's AI Config with the OpenAI pr ## Run ```bash -poetry run openai-example +poetry run openai ``` diff --git a/examples/openai/openai_example.py b/examples/openai/openai_example.py index 6520fb4..8d6c2e8 100755 --- a/examples/openai/openai_example.py +++ b/examples/openai/openai_example.py @@ -1,4 +1,5 @@ import os +import logging from dotenv import load_dotenv import ldclient from ldclient import Context @@ -9,13 +10,16 @@ load_dotenv() +logging.basicConfig() +logging.getLogger('ldclient').setLevel(logging.WARNING) + openai_client = OpenAI() # Set sdk_key to your LaunchDarkly SDK key. sdk_key = os.getenv('LAUNCHDARKLY_SDK_KEY') # Set config_key to the AI Config key you want to evaluate. -ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-ai-config') +ai_config_key = os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'sample-completion-config') def main(): @@ -66,31 +70,38 @@ def main(): return tracker = config_value.create_tracker() - + messages = [message.to_dict() for message in (config_value.messages or [])] - # Add the user input to the conversation - USER_INPUT = "What can you help me with?" - print("User Input:\n", USER_INPUT) - messages.append({'role': 'user', 'content': USER_INPUT}) + SAMPLE_QUESTION = "What can you help me with?" + messages.append({'role': 'user', 'content': SAMPLE_QUESTION}) + + print(f'\nSending sample question to {config_value.model.name}: "{SAMPLE_QUESTION}"') + print("Waiting for response...") - # Track the OpenAI completion with LaunchDarkly metrics using the LD OpenAI provider's extractor completion = tracker.track_metrics_of( + get_ai_metrics_from_response, lambda: openai_client.chat.completions.create( model=config_value.model.name, messages=messages, ), - get_ai_metrics_from_response, ) ai_response = completion.choices[0].message.content - # Add the AI response to the conversation history. messages.append({'role': 'assistant', 'content': ai_response}) - print("AI Response:\n", ai_response) - # Continue the conversation by adding user input to the messages list and invoking the LLM again. - print("Success.") + print(f"\nModel response:\n{ai_response}") + summary = tracker.get_summary() + print("\nDone! The AI config was evaluated and the following metrics were tracked:") + print(f" Duration: {summary.duration_ms}ms") + print(f" Success: {summary.success}") + if summary.usage: + print(f" Input tokens: {summary.usage.input}") + print(f" Output tokens: {summary.usage.output}") + print(f" Total tokens: {summary.usage.total}") + if summary.tool_calls: + print(f" Tool calls: {', '.join(summary.tool_calls)}") # Flush pending events and close the client. ldclient.get().flush() diff --git a/examples/openai/pyproject.toml b/examples/openai/pyproject.toml index 1edae8c..a56bbe5 100644 --- a/examples/openai/pyproject.toml +++ b/examples/openai/pyproject.toml @@ -8,13 +8,13 @@ readme = "README.md" packages = [{include = "openai_example.py"}] [tool.poetry.scripts] -openai-example = "openai_example:main" +openai = "openai_example:main" [tool.poetry.dependencies] python = "^3.10" python-dotenv = ">=1.0.0" -launchdarkly-server-sdk-ai = "^0.18.0" -launchdarkly-server-sdk-ai-openai = "^0.4.0" +launchdarkly-server-sdk-ai = ">=0.19.0" +launchdarkly-server-sdk-ai-openai = ">=0.5.0" openai = ">=1.0.0" [build-system]