From fe38faeec2359b0cbb9d9a86d58363f1e39bfe07 Mon Sep 17 00:00:00 2001 From: Qingyun Wu Date: Fri, 8 Sep 2023 11:14:06 -0400 Subject: [PATCH] update autogen doc link --- flaml/autogen/oai/completion.py | 2 +- website/blog/2023-04-21-LLM-tuning-math/index.mdx | 2 +- website/blog/2023-05-07-1M-milestone/index.mdx | 2 +- website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx | 4 ++-- website/blog/2023-06-28-MathChat/index.mdx | 2 +- website/blog/2023-07-14-Local-LLMs/index.mdx | 2 +- website/docs/Examples/AutoGen-AgentChat.md | 2 +- website/docs/Examples/AutoGen-OpenAI.md | 2 +- website/docs/Getting-Started.md | 4 ++-- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/flaml/autogen/oai/completion.py b/flaml/autogen/oai/completion.py index 0433c7363..2bb6d81e2 100644 --- a/flaml/autogen/oai/completion.py +++ b/flaml/autogen/oai/completion.py @@ -697,7 +697,7 @@ class Completion(openai_Completion): E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. The actual prompt will be: "Complete the following sentence: Today I feel". - More examples can be found at [templating](/docs/Use-Cases/Autogen#templating). + More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). use_cache (bool, Optional): Whether to use cached responses. config_list (List, Optional): List of configurations for the completion to try. The first one that does not raise an error will be used. diff --git a/website/blog/2023-04-21-LLM-tuning-math/index.mdx b/website/blog/2023-04-21-LLM-tuning-math/index.mdx index 2fdb79533..c8d01d1a4 100644 --- a/website/blog/2023-04-21-LLM-tuning-math/index.mdx +++ b/website/blog/2023-04-21-LLM-tuning-math/index.mdx @@ -69,6 +69,6 @@ The need for model selection, parameter tuning and cost saving is not specific t ## For Further Reading * [Research paper about the tuning technique](https://arxiv.org/abs/2303.04673) -* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen) +* [Documentation about `flaml.autogen`](https://microsoft.github.io/autogen/) *Do you have any experience to share about LLM applications? Do you like to see more support or research of LLM optimization or automation? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.* diff --git a/website/blog/2023-05-07-1M-milestone/index.mdx b/website/blog/2023-05-07-1M-milestone/index.mdx index 3109e4f60..c0a53bceb 100644 --- a/website/blog/2023-05-07-1M-milestone/index.mdx +++ b/website/blog/2023-05-07-1M-milestone/index.mdx @@ -37,7 +37,7 @@ We invite contributions from anyone interested in this topic and look forward to ## For Further Reading -* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen) +* [Documentation about `flaml.autogen`](https://microsoft.github.io/autogen/) * [Code Example: Tune chatGPT for Math Problem Solving with FLAML](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb) *Do you have any experience to share about LLM applications? Do you like to see more support or research of LLMOps? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.* diff --git a/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx b/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx index 12e2bd670..0c0500650 100644 --- a/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx +++ b/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx @@ -144,7 +144,7 @@ An example notebook to run this experiment can be found at: https://github.com/m ## Discussion -Our solution is quite simple to [implement](/docs/reference/autogen/code_utils#implement) using a generic interface offered in [`flaml.autogen`](/docs/Use-Cases/Autogen#logic-error), yet the result is quite encouraging. +Our solution is quite simple to implement using a generic interface offered in [`flaml.autogen`](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#logic-error), yet the result is quite encouraging. While the specific way of generating assertions is application-specific, the main ideas are general in LLM operations: * Generate multiple responses to select - especially useful when selecting a good response is relatively easier than generating a good response at one shot. @@ -164,5 +164,5 @@ There are many directions of extensions in research and development: ## For Further Reading -* [Documentation](/docs/Use-Cases/Autogen) about `flaml.autogen` and [Research paper](https://arxiv.org/abs/2303.04673). +* [Documentation](https://microsoft.github.io/autogen/) about `flaml.autogen` and [Research paper](https://arxiv.org/abs/2303.04673). * [Blog post](/blog/2023/04/21/LLM-tuning-math) about a related study for math. diff --git a/website/blog/2023-06-28-MathChat/index.mdx b/website/blog/2023-06-28-MathChat/index.mdx index d94075d0f..ce4b945a9 100644 --- a/website/blog/2023-06-28-MathChat/index.mdx +++ b/website/blog/2023-06-28-MathChat/index.mdx @@ -89,6 +89,6 @@ Further work can be done to enhance this framework or math problem-solving in ge ## For Further Reading * [Research paper of MathChat](https://arxiv.org/abs/2306.01337) -* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen) +* [Documentation about `flaml.autogen`](https://microsoft.github.io/autogen/) *Are you working on applications that involve math problem-solving? Would you appreciate additional research or support on the application of LLM-based agents for math problem-solving? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.* diff --git a/website/blog/2023-07-14-Local-LLMs/index.mdx b/website/blog/2023-07-14-Local-LLMs/index.mdx index 3f04b6d18..059205749 100644 --- a/website/blog/2023-07-14-Local-LLMs/index.mdx +++ b/website/blog/2023-07-14-Local-LLMs/index.mdx @@ -143,5 +143,5 @@ print(response) ## For Further Reading -* [Documentation](/docs/Use-Cases/Autogen) about `flaml.autogen` +* [Documentation](https://microsoft.github.io/autogen/) about `flaml.autogen` * [Documentation](https://github.com/lm-sys/FastChat) about FastChat. diff --git a/website/docs/Examples/AutoGen-AgentChat.md b/website/docs/Examples/AutoGen-AgentChat.md index 632053ddf..49d27fd42 100644 --- a/website/docs/Examples/AutoGen-AgentChat.md +++ b/website/docs/Examples/AutoGen-AgentChat.md @@ -2,7 +2,7 @@ `flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance via multi-agent conversation. -Please find documentation about this feature [here](/docs/Use-Cases/Autogen#agents). +Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat). Links to notebook examples: * [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb) diff --git a/website/docs/Examples/AutoGen-OpenAI.md b/website/docs/Examples/AutoGen-OpenAI.md index f86b2328f..fc93cbf98 100644 --- a/website/docs/Examples/AutoGen-OpenAI.md +++ b/website/docs/Examples/AutoGen-OpenAI.md @@ -1,7 +1,7 @@ # AutoGen - Tune GPT Models `flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The research study finds that tuning hyperparameters can significantly improve the utility of them. -Please find documentation about this feature [here](/docs/Use-Cases/Autogen#enhanced-inference). +Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/#enhanced-inference). Links to notebook examples: * [Optimize for Code Generation](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb) diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md index a8889d711..69da828a1 100644 --- a/website/docs/Getting-Started.md +++ b/website/docs/Getting-Started.md @@ -20,7 +20,7 @@ Install FLAML from pip: `pip install flaml`. Find more options in [Installation] There are several ways of using flaml: -#### (New) [Autogen](/docs/Use-Cases/Autogen) +#### (New) [Autogen](https://microsoft.github.io/autogen/) Autogen enables the next-gen GPT-X applications with a generic multi-agent conversation framework. It offers customizable and conversable agents which integrate LLMs, tools and human. @@ -118,7 +118,7 @@ Then, you can use it just like you use the original `LGMBClassifier`. Your other ### Where to Go Next? -* Understand the use cases for [Autogen](/docs/Use-Cases/Autogen), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML). +* Understand the use cases for [Autogen](https://microsoft.github.io/autogen/), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML). * Find code examples under "Examples": from [AutoGen - AgentChat](/docs/Examples/AutoGen-AgentChat) to [Tune - PyTorch](/docs/Examples/Tune-PyTorch). * Learn about [research](/docs/Research) around FLAML and check [blogposts](/blog). * Chat on [Discord](https://discord.gg/Cppx2vSPVP).