Skip to content

Commit

Permalink
Merge pull request #73 from ServiceNow/set_node_by_name
Browse files Browse the repository at this point in the history
Set node by name
  • Loading branch information
ollmer authored Oct 31, 2024
2 parents 25b5095 + 23224a8 commit f884c54
Show file tree
Hide file tree
Showing 76 changed files with 1,675 additions and 999 deletions.
6 changes: 3 additions & 3 deletions assets/failed_tape.json
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
},
{
"kind": "set_next_node",
"next_node": 1,
"next_node": "act",
"metadata": {
"agent": "Agent",
"node": "",
Expand Down Expand Up @@ -85,7 +85,7 @@
},
{
"kind": "set_next_node",
"next_node": 1,
"next_node": "act",
"metadata": {
"agent": "Agent",
"node": "",
Expand Down Expand Up @@ -122,7 +122,7 @@
},
{
"kind": "set_next_node",
"next_node": 1,
"next_node": "act",
"metadata": {
"agent": "Agent",
"node": "",
Expand Down
36 changes: 23 additions & 13 deletions examples/gaia_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,24 +123,34 @@ def prepare_guidance(cls, planning_mode: PlanningMode, subtasks: bool) -> list[G
GaiaNode(name="plan", guidance=PromptRegistry.plan),
GaiaNode(name="facts_survey", guidance=PromptRegistry.facts_survey),
GaiaNode(name="start_execution", guidance=PromptRegistry.start_execution),
GaiaNode(name="default", next_node=3),
GaiaNode(name="default", next_node="default"),
]
elif planning_mode == PlanningMode.reflect:
guidance_nodes = [
GaiaNode(name="plan", guidance=PromptRegistry.plan),
GaiaNode(name="facts_survey", guidance=PromptRegistry.facts_survey),
GaiaNode(name="start_execution", guidance=PromptRegistry.start_execution, next_node=6),
GaiaNode(name="think_after_observation", guidance=PromptRegistry.think_after_observation, next_node=6),
GaiaNode(name="think_after_calculation", guidance=PromptRegistry.think_after_calculation, next_node=6),
GaiaNode(name="default", next_node=6),
GaiaNode(
name="start_execution", guidance=PromptRegistry.start_execution, next_node="after_observation"
),
GaiaNode(
name="think_after_observation",
guidance=PromptRegistry.think_after_observation,
next_node="after_observation",
),
GaiaNode(
name="think_after_calculation",
guidance=PromptRegistry.think_after_calculation,
next_node="after_observation",
),
GaiaNode(name="default", next_node="after_observation"),
ObservationControlNode(
name="node_after_observation",
name="after_observation",
observation_to_node={
PageObservation: 3,
SearchResultsObservation: 3,
CalculationResultObservation: 4,
PageObservation: "think_after_observation",
SearchResultsObservation: "think_after_observation",
CalculationResultObservation: "think_after_calculation",
},
default_node=5,
default_node="default",
),
]
elif planning_mode == PlanningMode.facts_and_sources:
Expand All @@ -149,23 +159,23 @@ def prepare_guidance(cls, planning_mode: PlanningMode, subtasks: bool) -> list[G
GaiaNode(name="facts_survey", guidance=PromptRegistry.facts_survey),
GaiaNode(name="sources_plan", guidance=PromptRegistry.sources_plan),
GaiaNode(name="start_execution", guidance=PromptRegistry.start_execution),
GaiaNode(name="default", next_node=4),
GaiaNode(name="default", next_node="default"),
]
elif planning_mode == PlanningMode.multiplan:
guidance_nodes = [
GaiaNode(name="plan", guidance=PromptRegistry.plan3),
GaiaNode(name="facts_survey", guidance=PromptRegistry.facts_survey),
GaiaNode(name="sources_plan", guidance=PromptRegistry.sources_plan),
GaiaNode(name="start_execution", guidance=PromptRegistry.start_execution),
GaiaNode(name="default", next_node=4),
GaiaNode(name="default", next_node="default"),
]
elif planning_mode == PlanningMode.replan_after_sources:
guidance_nodes = [
GaiaNode(name="plan", guidance=PromptRegistry.plan),
GaiaNode(name="facts_survey", guidance=PromptRegistry.facts_survey),
GaiaNode(name="better_plan", guidance=PromptRegistry.better_plan),
GaiaNode(name="start_execution", guidance=PromptRegistry.start_execution),
GaiaNode(name="default", next_node=4),
GaiaNode(name="default", next_node="default"),
]
else:
raise ValueError(f"Unknown planning mode: {planning_mode}")
Expand Down
2 changes: 1 addition & 1 deletion examples/gsm8k_tuning/math_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def create(cls, llm: LLM):
steps_prompt=ALLOWED_STEPS,
agent_step_cls=MathAgentStep,
guidance=HINTS,
next_node=-1,
next_node="default",
),
],
max_iterations=2,
Expand Down
78 changes: 49 additions & 29 deletions examples/intro_clean.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,15 @@
"\n",
"\n",
"class MainNode(Node):\n",
" name: str = \"main\"\n",
"\n",
" def make_prompt(self, agent: Agent, tape: DialogTape) -> Prompt:\n",
" # Render the whole tape into the prompt, each step is converted to message\n",
" return Prompt(messages=tape_to_messages(tape))\n",
"\n",
" def generate_steps(self, agent: Agent, tape: DialogTape, llm_stream: LLMStream):\n",
" yield AssistantStep(content=llm_stream.get_text()) # Generate new step from the LLM output stream.\n",
" yield SetNextNode(next_node=0) # Which node to execute next, more on that later\n",
" yield SetNextNode(next_node=\"main\") # Which node to execute next, more on that later\n",
"\n",
"\n",
"agent = Agent[DialogTape].create(llm, nodes=[MainNode()])\n",
Expand Down Expand Up @@ -285,12 +287,14 @@
"\n",
"\n",
"class MainNode(Node):\n",
" name: str = \"main\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" return Prompt(messages=tape_to_messages(tape))\n",
"\n",
" def generate_steps(self, agent, tape, llm_stream: LLMStream):\n",
" yield AssistantStep(content=llm_stream.get_text())\n",
" yield SetNextNode(next_node=0) # Continue to the same first node\n",
" yield SetNextNode(next_node=\"main\") # Continue to the same node\n",
"\n",
"\n",
"node = MainNode()\n",
Expand Down Expand Up @@ -345,36 +349,38 @@
"from tapeagents.view import TapeViewStack\n",
"from tapeagents.core import StepMetadata\n",
"\n",
"# The \"top\" view in the tape view stack is the view of the current agent. Initially `top.next_node` is 0\".\n",
"# The \"top\" view in the tape view stack is the view of the current agent.\n",
"# Initially `top.last_node` is empty and the agent will run the first node from its list\".\n",
"tape1 = DialogTape(steps=[UserStep(content=\"Hi, AI!\")])\n",
"next_node1 = TapeViewStack.compute(tape1).top.next_node\n",
"print(next_node1)\n",
"assert next_node1 == 0\n",
"last_node = TapeViewStack.compute(tape1).top.last_node\n",
"print(f\"1: {last_node}\")\n",
"assert last_node == \"\"\n",
"\n",
"\n",
"# When the agent computes the view, it bumps up `top.next_node` every time it encounters a step with a new `prompt_id``.\n",
"# The new prompt_id on the tape signals to the agent the current node has run.\n",
"# When the agent computes the view, it updates `top.last_node` with the node from the latest agent step\n",
"# The agent will search the next node after the last_node in its nodes list.\n",
"tape2 = DialogTape(\n",
" steps=[\n",
" UserStep(content=\"Hi, AI!\"),\n",
" AssistantStep(metadata=StepMetadata(prompt_id=\"123\"), content=\"AI here, how I can help?\"),\n",
" AssistantStep(metadata=StepMetadata(prompt_id=\"123\", node=\"main\"), content=\"AI here, how I can help?\"),\n",
" ]\n",
")\n",
"next_node2 = TapeViewStack.compute(tape2).top.next_node\n",
"print(next_node2)\n",
"assert next_node2 == 1\n",
"last_node = TapeViewStack.compute(tape2).top.last_node\n",
"print(f\"2: {last_node}\")\n",
"assert last_node == \"main\"\n",
"\n",
"# The SetNextNode step on the tape changes `top.next_node` to the value of the `next_node` field in the SetNextNode step.\n",
"# The agent will use this value\n",
"tape3 = DialogTape(\n",
" steps=[\n",
" UserStep(content=\"Hi, AI!\"),\n",
" AssistantStep(metadata=StepMetadata(prompt_id=\"123\"), content=\"AI here, how I can help?\"),\n",
" SetNextNode(next_node=0),\n",
" SetNextNode(next_node=\"act\"),\n",
" ]\n",
")\n",
"next_node3 = TapeViewStack.compute(tape3).top.next_node\n",
"print(next_node3)\n",
"assert next_node3 == 0\n"
"next_node = TapeViewStack.compute(tape3).top.next_node\n",
"print(f\"3: {next_node}\")\n",
"assert next_node == \"act\"\n"
]
},
{
Expand All @@ -386,7 +392,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -443,7 +449,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Note that the agent is able to continue talking to you thanks for `SetNextNode(next_node=0)` step that `generate_steps` produced. If you try to remove this step as an exercise, the agent will crash because there is only one node."
"Note that the agent is able to continue talking to you thanks for `SetNextNode(next_node=\"main\")` step that `generate_steps` produced. If you try to remove this step as an exercise, the agent will crash because there is only one node."
]
},
{
Expand Down Expand Up @@ -506,6 +512,8 @@
"\n",
"\n",
"class PlanNode(Node):\n",
" name: str = \"plan\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" guidance = \"Write a natural language plan on how to use tools help the user. Output a list of numbered items, like 1., 2., 3., etc.\"\n",
" guidance_message = {\"role\": \"user\", \"content\": guidance}\n",
Expand All @@ -521,6 +529,8 @@
"\n",
"\n",
"class ActNode(Node):\n",
" name: str = \"act\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" guidance = \"Follow the plan you created to earlier. When you are done, respond to the user.\"\n",
" guidance_message = {\"role\": \"user\", \"content\": guidance}\n",
Expand All @@ -532,10 +542,10 @@
" o = llm_stream.get_output()\n",
" if o.content:\n",
" yield AssistantStep(content=o.content)\n",
" yield SetNextNode(next_node=0)\n",
" yield SetNextNode(next_node=\"plan\")\n",
" elif o.tool_calls:\n",
" yield ToolCalls.from_llm_output(o)\n",
" yield SetNextNode(next_node=1)\n",
" yield SetNextNode(next_node=\"act\")\n",
" else:\n",
" raise ValueError()\n",
"\n",
Expand Down Expand Up @@ -595,7 +605,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -654,6 +664,8 @@
"\n",
"\n",
"class PlanNode(Node):\n",
" name: str = \"plan\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" system_message = {\"role\": \"system\", \"content\": system_instruction}\n",
" guidance_message = {\"role\": \"user\", \"content\": agent.templates[\"planning\"]}\n",
Expand All @@ -672,6 +684,8 @@
"\n",
"\n",
"class ActNode(Node):\n",
" name: str = \"act\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" system_message = {\"role\": \"system\", \"content\": system_instruction}\n",
" guidance_message = {\"role\": \"user\", \"content\": agent.templates[\"call_or_respond\"]}\n",
Expand Down Expand Up @@ -707,10 +721,10 @@
" yield AssistantStep(content=\"Invalid LLM output: response and tool_call cannot be in the same message\")\n",
" if response:\n",
" yield AssistantStep(content=response)\n",
" yield SetNextNode(next_node=0)\n",
" yield SetNextNode(next_node=\"plan\")\n",
" if tool_calls:\n",
" yield ToolCalls(tool_calls=tool_calls)\n",
" yield SetNextNode(next_node=1)\n",
" yield SetNextNode(next_node=\"act\")\n",
" except Exception as e:\n",
" yield AssistantStep(content=\"Invalid JSON object: \" + str(e))\n",
"\n",
Expand Down Expand Up @@ -942,7 +956,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -1026,11 +1040,13 @@
"from tapeagents.core import Respond\n",
"from tapeagents.prompting import view_to_messages\n",
"\n",
"search_system_instruction = f\"\"\"Use at most 5 tool calls to search the request info on on the web.\"\"\"\n",
"search_system_instruction = \"Use at most 5 tool calls to search the request info on on the web.\"\n",
"search_system_message = {\"role\": \"system\", \"content\": search_system_instruction}\n",
"\n",
"\n",
"class SearchAgentMainNode(Node):\n",
" name: str = \"main\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" view = agent.compute_view(tape)\n",
" return Prompt(messages=view_to_messages(view.top, agent), tools=search_agent_env.get_tool_schema_dicts())\n",
Expand All @@ -1041,9 +1057,9 @@
" # if the LLM responds, yield Respond(..) as your last step\n",
" yield Respond(content=o.content)\n",
" elif o.tool_calls:\n",
" # while the LLM suggests tool calls, yield them as action steps\n",
" # when the LLM suggests tool calls, yield them as action steps\n",
" yield ToolCalls.from_llm_output(o)\n",
" yield SetNextNode(next_node=0)\n",
" yield SetNextNode(next_node=\"main\")\n",
" else:\n",
" raise ValueError()\n",
"\n",
Expand Down Expand Up @@ -1097,6 +1113,8 @@
"\n",
"\n",
"class PlanNode(Node):\n",
" name: str = \"plan\"\n",
"\n",
" def make_prompt(self, agent, tape) -> Prompt:\n",
" view = agent.compute_view(tape)\n",
" guidance = \"Write a natural language plan on how to use tools help the user. Output a list of numbered items, like 1., 2., 3., etc.\"\n",
Expand All @@ -1114,6 +1132,8 @@
"\n",
"\n",
"class ActNode(Node):\n",
" name: str = \"act\"\n",
"\n",
" def make_prompt(self, agent, tape: DialogTape) -> Prompt:\n",
" view = agent.compute_view(tape)\n",
" guidance = \"Follow the plan you created to earlier. When you are done, respond to the user.\"\n",
Expand All @@ -1126,10 +1146,10 @@
" def generate_steps(self, agent, dialog, llm_stream: LLMStream):\n",
" o = llm_stream.get_output()\n",
" if o.content:\n",
" yield SetNextNode(next_node=0)\n",
" yield SetNextNode(next_node=\"plan\")\n",
" yield AssistantStep(content=o.content)\n",
" elif o.tool_calls:\n",
" yield SetNextNode(next_node=1)\n",
" yield SetNextNode(next_node=\"act\")\n",
" # only keep the tool calls before the call to another agent\n",
" agent_call = None\n",
" for i, tc in enumerate(o.tool_calls):\n",
Expand Down
Loading

0 comments on commit f884c54

Please sign in to comment.