|
29 | 29 | from wayflowcore.models.llmmodel import LlmModel |
30 | 30 | from wayflowcore.models.llmmodelfactory import LlmModelFactory |
31 | 31 | from wayflowcore.models.vllmmodel import VllmModel |
32 | | -from wayflowcore.property import IntegerProperty, StringProperty |
| 32 | +from wayflowcore.property import BooleanProperty, IntegerProperty, StringProperty |
33 | 33 | from wayflowcore.steps import ( |
34 | 34 | AgentExecutionStep, |
35 | 35 | InputMessageStep, |
@@ -1618,3 +1618,145 @@ async def test_agent_can_run_async(remotely_hosted_llm): |
1618 | 1618 | conversation = agent.start_conversation() |
1619 | 1619 | status = await conversation.execute_async() |
1620 | 1620 | assert isinstance(status, UserMessageRequestStatus) |
| 1621 | + |
| 1622 | + |
| 1623 | +def test_error_on_caller_input_mode_never_with_initial_message(big_llama): |
| 1624 | + with pytest.raises( |
| 1625 | + ValueError, match="The caller input mode for the agent is set to `CallerInputMode.NEVER`" |
| 1626 | + ): |
| 1627 | + Agent( |
| 1628 | + llm=big_llama, |
| 1629 | + caller_input_mode=CallerInputMode.NEVER, |
| 1630 | + initial_message="Hi, what's your name?", |
| 1631 | + ) |
| 1632 | + |
| 1633 | + |
| 1634 | +def _get_haiku_tool( |
| 1635 | + submitted_haikus: List[str], |
| 1636 | + success_message: str = "Haiku Submitted Successfully.", |
| 1637 | +): |
| 1638 | + @tool(description_mode="only_docstring") |
| 1639 | + def submit_haiku(haiku: str) -> str: |
| 1640 | + """Submit your haiku. |
| 1641 | +
|
| 1642 | + Parameters |
| 1643 | + ---------- |
| 1644 | + haiku : |
| 1645 | + the full haiku (all three verses of it) |
| 1646 | +
|
| 1647 | + Returns |
| 1648 | + ------- |
| 1649 | + A status code |
| 1650 | + """ |
| 1651 | + submitted_haikus.append(haiku) |
| 1652 | + return success_message |
| 1653 | + |
| 1654 | + return submit_haiku |
| 1655 | + |
| 1656 | + |
| 1657 | +@retry_test(max_attempts=3) |
| 1658 | +@pytest.mark.parametrize("can_finish_conversation", [True]) |
| 1659 | +def test_caller_input_mode_never(big_llama, can_finish_conversation): |
| 1660 | + """ |
| 1661 | + Failure rate: 0 out of 20 |
| 1662 | + Observed on: 2025-07-28 |
| 1663 | + Average success time: 4.20 seconds per successful attempt |
| 1664 | + Average failure time: No time measurement |
| 1665 | + Max attempt: 3 |
| 1666 | + Justification: (0.05 ** 3) ~= 9.4 / 100'000 |
| 1667 | + """ |
| 1668 | + |
| 1669 | + submitted_haikus = [] |
| 1670 | + |
| 1671 | + agent = Agent( |
| 1672 | + llm=big_llama, |
| 1673 | + tools=[_get_haiku_tool(submitted_haikus)], |
| 1674 | + caller_input_mode=CallerInputMode.NEVER, |
| 1675 | + custom_instruction="You are a helpful assistant, who always uses the appropriate tool to submit a single Haiku, and then finishes the conversation.", |
| 1676 | + initial_message=None, |
| 1677 | + can_finish_conversation=can_finish_conversation, |
| 1678 | + max_iterations=5, |
| 1679 | + ) |
| 1680 | + |
| 1681 | + conv = agent.start_conversation() |
| 1682 | + conv.execute() |
| 1683 | + assert len(submitted_haikus) == 1 |
| 1684 | + |
| 1685 | + |
| 1686 | +@retry_test(max_attempts=3) |
| 1687 | +@pytest.mark.parametrize("can_finish_conversation", [True, False]) |
| 1688 | +def test_caller_input_mode_never_with_agent_template(big_llama, can_finish_conversation): |
| 1689 | + """ |
| 1690 | + Failure rate: 0 out of 20 |
| 1691 | + Observed on: 2025-07-28 |
| 1692 | + Average success time: 4.20 seconds per successful attempt |
| 1693 | + Average failure time: No time measurement |
| 1694 | + Max attempt: 3 |
| 1695 | + Justification: (0.05 ** 3) ~= 9.4 / 100'000 |
| 1696 | + """ |
| 1697 | + submitted_haikus = [] |
| 1698 | + |
| 1699 | + agent = Agent( |
| 1700 | + llm=big_llama, |
| 1701 | + tools=[_get_haiku_tool(submitted_haikus)], |
| 1702 | + caller_input_mode=CallerInputMode.NEVER, |
| 1703 | + initial_message=None, |
| 1704 | + custom_instruction="You are a helpful assistant, who always uses the appropriate tool to submit a single Haiku, and then finishes the conversation.", |
| 1705 | + agent_template=PromptTemplate( |
| 1706 | + messages=[ |
| 1707 | + Message("{{user_input}}", MessageType.USER), |
| 1708 | + PromptTemplate.CHAT_HISTORY_PLACEHOLDER, |
| 1709 | + ], |
| 1710 | + ), |
| 1711 | + output_descriptors=[ |
| 1712 | + BooleanProperty( |
| 1713 | + "haiku_submitted", |
| 1714 | + description="true if the haiku was successfully submitted", |
| 1715 | + default_value=False, |
| 1716 | + ) |
| 1717 | + ], |
| 1718 | + can_finish_conversation=can_finish_conversation, |
| 1719 | + max_iterations=5, |
| 1720 | + ) |
| 1721 | + |
| 1722 | + conv = agent.start_conversation(inputs={"user_input": "I want my haiku to be about trees"}) |
| 1723 | + status = conv.execute() |
| 1724 | + assert isinstance(status, FinishedStatus) |
| 1725 | + assert status.output_values["haiku_submitted"] |
| 1726 | + assert len(submitted_haikus) == 1 |
| 1727 | + |
| 1728 | + |
| 1729 | +@retry_test(max_attempts=3) |
| 1730 | +def test_caller_input_mode_never_with_single_iteration(big_llama): |
| 1731 | + """ |
| 1732 | + Failure rate: 0 out of 20 |
| 1733 | + Observed on: 2025-07-28 |
| 1734 | + Average success time: 0.87 seconds per successful attempt |
| 1735 | + Average failure time: No time measurement |
| 1736 | + Max attempt: 3 |
| 1737 | + Justification: (0.05 ** 3) ~= 9.4 / 100'000 |
| 1738 | + """ |
| 1739 | + submitted_haikus = [] |
| 1740 | + |
| 1741 | + with pytest.warns( |
| 1742 | + UserWarning, match="Maximum number of iterations is set to one for the Agent.*" |
| 1743 | + ): |
| 1744 | + agent = Agent( |
| 1745 | + llm=big_llama, |
| 1746 | + tools=[_get_haiku_tool(submitted_haikus)], |
| 1747 | + caller_input_mode=CallerInputMode.NEVER, |
| 1748 | + custom_instruction="You are a helpful assistant, who always uses the appropriate tool to submit a single Haiku, and then finishes the conversation.", |
| 1749 | + output_descriptors=[ |
| 1750 | + BooleanProperty( |
| 1751 | + "haiku_submitted", "true if the haiku was submitted", default_value=False |
| 1752 | + ) |
| 1753 | + ], |
| 1754 | + initial_message=None, |
| 1755 | + can_finish_conversation=False, |
| 1756 | + max_iterations=1, |
| 1757 | + ) |
| 1758 | + |
| 1759 | + conv = agent.start_conversation() |
| 1760 | + status = conv.execute() |
| 1761 | + assert isinstance(status, FinishedStatus) |
| 1762 | + assert not status.output_values["haiku_submitted"] |
0 commit comments