diff --git a/units/en/unit2/llama-index/agents.mdx b/units/en/unit2/llama-index/agents.mdx index 927a1645..65661f2f 100644 --- a/units/en/unit2/llama-index/agents.mdx +++ b/units/en/unit2/llama-index/agents.mdx @@ -42,7 +42,11 @@ def multiply(a: int, b: int) -> int: return a * b # initialize llm -llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") +# Check available providers: https://huggingface.co/inference/models +llm = HuggingFaceInferenceAPI( + model_name="Qwen/Qwen2.5-Coder-32B-Instruct", + provider="auto" +) # initialize agent agent = AgentWorkflow.from_tools_or_functions( diff --git a/units/en/unit2/llama-index/components.mdx b/units/en/unit2/llama-index/components.mdx index 29549831..be211574 100644 --- a/units/en/unit2/llama-index/components.mdx +++ b/units/en/unit2/llama-index/components.mdx @@ -159,7 +159,12 @@ We also pass in an LLM to the query engine to use for the response. ```python from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI -llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") +# Check available providers: https://huggingface.co/inference/models +llm = HuggingFaceInferenceAPI( + model_name="Qwen/Qwen2.5-Coder-32B-Instruct", + provider="auto" +) + query_engine = index.as_query_engine( llm=llm, response_mode="tree_summarize", diff --git a/units/en/unit2/llama-index/workflows.mdx b/units/en/unit2/llama-index/workflows.mdx index 70926e40..462b07d3 100644 --- a/units/en/unit2/llama-index/workflows.mdx +++ b/units/en/unit2/llama-index/workflows.mdx @@ -201,7 +201,11 @@ def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b -llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") +# Check available providers: https://huggingface.co/inference/models +llm = HuggingFaceInferenceAPI( + model_name="Qwen/Qwen2.5-Coder-32B-Instruct", + provider="auto" +) # we can pass functions directly without FunctionTool -- the fn/docstring are parsed for the name/description multiply_agent = ReActAgent( diff --git a/units/en/unit3/agentic-rag/agent.mdx b/units/en/unit3/agentic-rag/agent.mdx index 396db87e..8278663d 100644 --- a/units/en/unit3/agentic-rag/agent.mdx +++ b/units/en/unit3/agentic-rag/agent.mdx @@ -69,7 +69,11 @@ Now, let's combine all these tools into a single agent: ```python # Initialize the Hugging Face model -llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") +# Check available providers: https://huggingface.co/inference/models +llm = HuggingFaceInferenceAPI( + model_name="Qwen/Qwen2.5-Coder-32B-Instruct", + provider="auto" +) # Create Alfred with all the tools alfred = AgentWorkflow.from_tools_or_functions( @@ -90,7 +94,8 @@ from langgraph.graph import START, StateGraph from langgraph.prebuilt import tools_condition from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace -from tools import DuckDuckGoSearchRun, weather_info_tool, hub_stats_tool +from langchain_community.tools import DuckDuckGoSearchRun +from tools import weather_info_tool, hub_stats_tool from retriever import guest_info_tool ```