diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/index.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/index.md index fe66f2bff..f93f1ba83 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/index.md @@ -18,4 +18,5 @@ local-llms-ollama-litellm instrumenting topic-subscription-scenarios structured-output-agent +llm-usage-logger ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llm-usage-logger.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llm-usage-logger.ipynb new file mode 100644 index 000000000..dbac505ca --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/llm-usage-logger.ipynb @@ -0,0 +1,128 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tracking LLM usage with a logger\n", + "\n", + "The model clients included in AutoGen emit structured events that can be used to track the usage of the model. This notebook demonstrates how to use the logger to track the usage of the model.\n", + "\n", + "These events are logged to the logger with the name: :py:attr:`autogen_core.application.logging.EVENT_LOGGER_NAME`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "\n", + "from autogen_core.application.logging.events import LLMCallEvent\n", + "\n", + "\n", + "class LLMUsageTracker(logging.Handler):\n", + " def __init__(self) -> None:\n", + " \"\"\"Logging handler that tracks the number of tokens used in the prompt and completion.\n", + "\n", + " Example:\n", + "\n", + " .. code-block:: python\n", + "\n", + " from autogen_core.application.logging import LLMUsageTracker, EVENT_LOGGER_NAME\n", + "\n", + " # Set up the logging configuration to use the custom handler\n", + " logger = logging.getLogger(EVENT_LOGGER_NAME)\n", + " logger.setLevel(logging.INFO)\n", + " llm_usage = LLMUsageTracker()\n", + " logger.handlers = [llm_usage]\n", + "\n", + " # ...\n", + "\n", + " print(llm_usage.prompt_tokens)\n", + " print(llm_usage.completion_tokens)\n", + "\n", + " \"\"\"\n", + " super().__init__()\n", + " self._prompt_tokens = 0\n", + " self._completion_tokens = 0\n", + "\n", + " @property\n", + " def tokens(self) -> int:\n", + " return self._prompt_tokens + self._completion_tokens\n", + "\n", + " @property\n", + " def prompt_tokens(self) -> int:\n", + " return self._prompt_tokens\n", + "\n", + " @property\n", + " def completion_tokens(self) -> int:\n", + " return self._completion_tokens\n", + "\n", + " def reset(self) -> None:\n", + " self._prompt_tokens = 0\n", + " self._completion_tokens = 0\n", + "\n", + " def emit(self, record: logging.LogRecord) -> None:\n", + " \"\"\"Emit the log record. To be used by the logging module.\"\"\"\n", + " try:\n", + " # Use the StructuredMessage if the message is an instance of it\n", + " if isinstance(record.msg, LLMCallEvent):\n", + " event = record.msg\n", + " self._prompt_tokens += event.prompt_tokens\n", + " self._completion_tokens += event.completion_tokens\n", + " except Exception:\n", + " self.handleError(record)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, this logger can be attached like any other Python logger and the values read after the model is run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.application.logging import EVENT_LOGGER_NAME\n", + "\n", + "# Set up the logging configuration to use the custom handler\n", + "logger = logging.getLogger(EVENT_LOGGER_NAME)\n", + "logger.setLevel(logging.INFO)\n", + "llm_usage = LLMUsageTracker()\n", + "logger.handlers = [llm_usage]\n", + "\n", + "# client.create(...)\n", + "\n", + "print(llm_usage.prompt_tokens)\n", + "print(llm_usage.completion_tokens)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py b/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py index 1a5f08054..f5020fea8 100644 --- a/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/application/logging/__init__.py @@ -1,5 +1,3 @@ -from ._llm_usage import LLMUsageTracker - ROOT_LOGGER_NAME = "autogen_core" """str: Logger name used for structured event logging""" @@ -14,5 +12,4 @@ __all__ = [ "ROOT_LOGGER_NAME", "EVENT_LOGGER_NAME", "TRACE_LOGGER_NAME", - "LLMUsageTracker", ] diff --git a/python/packages/autogen-core/src/autogen_core/application/logging/_llm_usage.py b/python/packages/autogen-core/src/autogen_core/application/logging/_llm_usage.py deleted file mode 100644 index 593739a23..000000000 --- a/python/packages/autogen-core/src/autogen_core/application/logging/_llm_usage.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging - -from .events import LLMCallEvent - - -class LLMUsageTracker(logging.Handler): - def __init__(self) -> None: - """Logging handler that tracks the number of tokens used in the prompt and completion. - - Example: - - .. code-block:: python - - from autogen_core.application.logging import LLMUsageTracker, EVENT_LOGGER_NAME - - # Set up the logging configuration to use the custom handler - logger = logging.getLogger(EVENT_LOGGER_NAME) - logger.setLevel(logging.INFO) - llm_usage = LLMUsageTracker() - logger.handlers = [llm_usage] - - # ... - - print(llm_usage.prompt_tokens) - print(llm_usage.completion_tokens) - - """ - super().__init__() - self._prompt_tokens = 0 - self._completion_tokens = 0 - - @property - def tokens(self) -> int: - return self._prompt_tokens + self._completion_tokens - - @property - def prompt_tokens(self) -> int: - return self._prompt_tokens - - @property - def completion_tokens(self) -> int: - return self._completion_tokens - - def reset(self) -> None: - self._prompt_tokens = 0 - self._completion_tokens = 0 - - def emit(self, record: logging.LogRecord) -> None: - """Emit the log record. To be used by the logging module.""" - try: - # Use the StructuredMessage if the message is an instance of it - if isinstance(record.msg, LLMCallEvent): - event = record.msg - self._prompt_tokens += event.prompt_tokens - self._completion_tokens += event.completion_tokens - except Exception: - self.handleError(record) diff --git a/python/packages/autogen-core/tests/test_llm_usage.py b/python/packages/autogen-core/tests/test_llm_usage.py deleted file mode 100644 index 9f8586692..000000000 --- a/python/packages/autogen-core/tests/test_llm_usage.py +++ /dev/null @@ -1,32 +0,0 @@ -import logging - -from autogen_core.application.logging import EVENT_LOGGER_NAME, LLMUsageTracker -from autogen_core.application.logging.events import LLMCallEvent - - -def test_llm_usage() -> None: - # Set up the logging configuration to use the custom handler - logger = logging.getLogger(EVENT_LOGGER_NAME) - logger.setLevel(logging.INFO) - llm_usage = LLMUsageTracker() - logger.handlers = [llm_usage] - - logger.info(LLMCallEvent(prompt_tokens=10, completion_tokens=20)) - - assert llm_usage.prompt_tokens == 10 - assert llm_usage.completion_tokens == 20 - - logger.info(LLMCallEvent(prompt_tokens=1, completion_tokens=1)) - - assert llm_usage.prompt_tokens == 11 - assert llm_usage.completion_tokens == 21 - - llm_usage.reset() - - assert llm_usage.prompt_tokens == 0 - assert llm_usage.completion_tokens == 0 - - logger.info(LLMCallEvent(prompt_tokens=1, completion_tokens=1)) - - assert llm_usage.prompt_tokens == 1 - assert llm_usage.completion_tokens == 1