@@ -126,105 +126,100 @@ async def process_query(query: str, kb_id: str) -> Dict[str, Any]:
126126 if not bedrock_runtime :
127127 raise ValueError ('Bedrock client is not initialized' )
128128
129- # Process with MCP client
130- logger .info ('Connecting to MCP server' )
131- async with mcp_client as client :
132- # Get tools from the MCP server
133- logger .info ('Getting tools from MCP server' )
134- original_tools = client .get_tools ()
135- tool_names = [tool .name for tool in original_tools ]
136- logger .info (f'Retrieved { len (original_tools )} tools from MCP server: { tool_names } ' )
137-
138- if not original_tools :
139- logger .warning ('No tools were returned from the MCP server' )
140- return {
141- 'messages' : [{'content' : 'No tools available from the knowledge base server.' }]
142- }
129+ # Get tools from the MCP server
130+ logger .info ('Getting tools from MCP server' )
131+ tools = await mcp_client .get_tools ()
132+ logger .info (
133+ f'Retrieved { len (tools )} tools from MCP server: { [tool .name for tool in tools ]} '
134+ )
135+
136+ if not tools :
137+ logger .warning ('No tools were returned from the MCP server' )
138+ return {
139+ 'messages' : [{'content' : 'No tools available from the knowledge base server.' }]
140+ }
141+
142+ # Create a ChatBedrock instance with tools
143+ logger .info ('Creating ChatBedrock with tools' )
144+ chat_model = ChatBedrock (
145+ client = bedrock_runtime ,
146+ model_id = 'anthropic.claude-3-sonnet-20240229-v1:0' ,
147+ model_kwargs = {
148+ 'temperature' : 0.7 ,
149+ 'max_tokens' : 2048 ,
150+ 'anthropic_version' : 'bedrock-2023-05-31' ,
151+ },
152+ streaming = False ,
153+ system_prompt_with_tools = SYSTEM_PROMPT ,
154+ )
155+
156+ # Prepare tools for Bedrock
157+ logger .info ('Preparing tools for Bedrock' )
158+ model = chat_model .bind_tools (tools )
159+
160+ # Start conversation with Bedrock - include KB ID in the message
161+ kb_info = f'Use knowledge base ID: { kb_id } for any knowledge base queries.'
162+ enhanced_query = f'{ kb_info } \n \n User query: { query } '
163+ messages = [HumanMessage (content = enhanced_query )]
164+
165+ logger .info ('Sending initial query to Bedrock' )
166+ response = await model .ainvoke (
167+ messages ,
168+ )
143169
144- # We'll use the original tools
145- tools = original_tools
146-
147- # Create a ChatBedrock instance with tools
148- logger .info ('Creating ChatBedrock with tools' )
149- chat_model = ChatBedrock (
150- client = bedrock_runtime ,
151- model_id = 'anthropic.claude-3-sonnet-20240229-v1:0' ,
152- model_kwargs = {
153- 'temperature' : 0.7 ,
154- 'max_tokens' : 2048 ,
155- 'anthropic_version' : 'bedrock-2023-05-31' ,
156- },
157- streaming = False ,
158- system_prompt_with_tools = SYSTEM_PROMPT ,
159- )
160-
161- # Prepare tools for Bedrock
162- logger .info ('Preparing tools for Bedrock' )
163- model = chat_model .bind_tools (tools )
164-
165- # Start conversation with Bedrock - include KB ID in the message
166- kb_info = f'Use knowledge base ID: { kb_id } for any knowledge base queries.'
167- enhanced_query = f'{ kb_info } \n \n User query: { query } '
168- messages = [HumanMessage (content = enhanced_query )]
169-
170- logger .info ('Sending initial query to Bedrock' )
171- response = await model .ainvoke (
172- messages ,
173- )
174-
175- # Check if Bedrock requested a tool
176- if hasattr (response , 'tool_calls' ) and response .tool_calls :
177- logger .info ('Bedrock requested tool use' )
178- logger .info (f'Tool calls: { response .tool_calls } ' )
179-
180- for tool_call in response .tool_calls :
181- tool_name = tool_call ['name' ]
182- tool_args = tool_call ['args' ]
183- tool_id = tool_call ['id' ]
184-
185- logger .info (f'Tool requested: { tool_name } with args: { tool_args } ' )
186-
187- # Find the requested tool
188- requested_tool = None
189- for tool in tools :
190- if tool .name == tool_name :
191- requested_tool = tool
192- break
193-
194- if not requested_tool :
195- logger .warning (f'Requested tool { tool_name } not found' )
196- continue
197-
198- # For query_knowledge_base tool, ensure we use the correct KB ID
199- if tool_name == 'query_knowledge_base' :
200- # Always override kb_id with the one from the request
201- tool_args ['kb_id' ] = kb_id
202-
203- # Execute the tool
204- logger .info (f'Executing tool { tool_name } ' )
205- tool_result = await requested_tool .ainvoke (tool_args )
206- logger .debug (f'Tool result: { tool_result } ' )
207-
208- # Create a new conversation with the tool response - use the original query
209- new_messages = [HumanMessage (content = enhanced_query )]
210- new_messages .append (response ) # Add the original AI response with tool_calls
211- new_messages .append (
212- ToolMessage (
213- content = str (tool_result ),
214- tool_call_id = tool_id ,
215- name = tool_name ,
216- )
170+ # Check if Bedrock requested a tool
171+ if hasattr (response , 'tool_calls' ) and response .tool_calls :
172+ logger .info ('Bedrock requested tool use' )
173+ logger .info (f'Tool calls: { response .tool_calls } ' )
174+
175+ for tool_call in response .tool_calls :
176+ tool_name = tool_call ['name' ]
177+ tool_args = tool_call ['args' ]
178+ tool_id = tool_call ['id' ]
179+
180+ logger .info (f'Tool requested: { tool_name } with args: { tool_args } ' )
181+
182+ # Find the requested tool
183+ requested_tool = None
184+ for tool in tools :
185+ if tool .name == tool_name :
186+ requested_tool = tool
187+ break
188+
189+ if not requested_tool :
190+ logger .warning (f'Requested tool { tool_name } not found' )
191+ continue
192+
193+ # For query_knowledge_base tool, ensure we use the correct KB ID
194+ if tool_name == 'query_knowledge_base' :
195+ # Always override kb_id with the one from the request
196+ tool_args ['kb_id' ] = kb_id
197+
198+ # Execute the tool
199+ logger .info (f'Executing tool { tool_name } ' )
200+ tool_result = await requested_tool .ainvoke (tool_args )
201+ logger .debug (f'Tool result: { tool_result } ' )
202+
203+ # Create a new conversation with the tool response - use the original query
204+ new_messages = [HumanMessage (content = enhanced_query )]
205+ new_messages .append (response ) # Add the original AI response with tool_calls
206+ new_messages .append (
207+ ToolMessage (
208+ content = str (tool_result ),
209+ tool_call_id = tool_id ,
210+ name = tool_name ,
217211 )
212+ )
218213
219- # Get final response from Bedrock with tool results
220- logger .info ('Sending tool results back to Bedrock' )
221- final_response = await model .ainvoke (new_messages )
222- response_content = str (final_response .content )
214+ # Get final response from Bedrock with tool results
215+ logger .info ('Sending tool results back to Bedrock' )
216+ final_response = await model .ainvoke (new_messages )
217+ response_content = str (final_response .content )
223218
224- return {'messages' : [{'content' : response_content }]}
219+ return {'messages' : [{'content' : response_content }]}
225220
226- # If no tool was requested, return the direct response
227- return {'messages' : [{'content' : response .content }]}
221+ # If no tool was requested, return the direct response
222+ return {'messages' : [{'content' : response .content }]}
228223
229224 except Exception as e :
230225 logger .error (f'Error in process_query: { str (e )} ' )
0 commit comments