Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion models/bedrock/manifest.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
version: 0.0.51
version: 0.0.52
type: plugin
author: langgenius
name: bedrock
Expand Down
27 changes: 24 additions & 3 deletions models/bedrock/models/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,8 @@ def _generate_with_converse(
prompt_messages,
model_id=cache_config_model_id,
system_cache_checkpoint=system_cache_checkpoint,
latest_two_messages_cache_checkpoint=latest_two_messages_cache_checkpoint
latest_two_messages_cache_checkpoint=latest_two_messages_cache_checkpoint,
tools=tools
)
inference_config, additional_model_fields = self._convert_converse_api_model_parameters(model_parameters, stop)

Expand Down Expand Up @@ -425,7 +426,7 @@ def _generate_with_converse(
)
placeholder_tool = PromptMessageTool(
name="__no_more_tools_available__",
description="This is a placeholder tool. No more tools are available for this conversation. Please provide a final answer based on the information already gathered.",
description="No tools are currently available for use. Please respond without using any tools.",
parameters={
"type": "object",
"properties": {},
Expand Down Expand Up @@ -774,7 +775,8 @@ def _convert_converse_api_model_parameters(
return inference_config, additional_model_fields

def _convert_converse_prompt_messages(self, prompt_messages: list[PromptMessage], model_id: str = None,
system_cache_checkpoint: bool = True, latest_two_messages_cache_checkpoint: bool = False) -> tuple[list, list[dict]]:
system_cache_checkpoint: bool = True, latest_two_messages_cache_checkpoint: bool = False,
tools: Optional[list[PromptMessageTool]] = None) -> tuple[list, list[dict]]:
"""
Convert prompt messages to dict list and system
Add cache points for supported models when enable_cache is True
Expand Down Expand Up @@ -811,6 +813,25 @@ def _convert_converse_prompt_messages(self, prompt_messages: list[PromptMessage]
message_dict = self._convert_prompt_message_to_dict(message)
prompt_message_dicts.append(message_dict)

# Check if we're in the final iteration of function calling.
# This is specifically for Bedrock/Claude which requires toolConfig when messages contain tool use.
has_tool_history = any(
isinstance(msg, ToolPromptMessage) or (isinstance(msg, AssistantPromptMessage) and msg.tool_calls)
for msg in prompt_messages
)
is_final_iteration = not tools and has_tool_history
# If this is the final iteration, append an instruction for the user.
if is_final_iteration:
final_instruction_text = (
"IMPORTANT: DO NOT call any more tools. Summarize the information you have gathered from previous tool calls."
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This final prompt maybe not meet the requirement for all situation. And it's not transparent to the end user.

Copy link
Contributor Author

@noproblem520 noproblem520 Nov 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Dify workflow agent node fails to generate a final summary in the last round if the LLM isn't explicitly prompted to summarize. The last round in the agent returns an message like There is not a tool named __no_more_tools_available__.. This results in the workflow returning an truncated message, e.g. "Hi, I will help you figure out the problem:", based on the user's question

Our logic dictates that the final prompt is only added when toolUse and toolResult are present and the flow is in the is_final_round. This implementation can prevent any other issues, I guess.

I've validated this code in production, and it's working perfectly. If you have any concerns, please let me know.

)
final_instruction_message = {
"role": "user",
"content": [{"text": final_instruction_text}]
}
# Append to the end of prompt_message_dicts
prompt_message_dicts.append(final_instruction_message)

# Only add cache point to messages if supported and latest_two_messages_cache_checkpoint is enabled
if cache_config and "messages" in cache_config["supported_fields"] and latest_two_messages_cache_checkpoint:
# Find all user messages
Expand Down