forked from openai/openai-agents-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparallelization.py
More file actions
65 lines (51 loc) · 1.66 KB
/
parallelization.py
File metadata and controls
65 lines (51 loc) · 1.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import asyncio
from agents import Agent, ItemHelpers, Runner, trace
from examples.auto_mode import input_with_fallback
"""
This example shows the parallelization pattern. We run the agent three times in parallel, and pick
the best result.
"""
spanish_agent = Agent(
name="spanish_agent",
instructions="You translate the user's message to Spanish",
)
translation_picker = Agent(
name="translation_picker",
instructions="You pick the best Spanish translation from the given options.",
)
async def main():
msg = input_with_fallback(
"Hi! Enter a message, and we'll translate it to Spanish.\n\n",
"Good morning!",
)
# Ensure the entire workflow is a single trace
with trace("Parallel translation"):
res_1, res_2, res_3 = await asyncio.gather(
Runner.run(
spanish_agent,
msg,
),
Runner.run(
spanish_agent,
msg,
),
Runner.run(
spanish_agent,
msg,
),
)
outputs = [
ItemHelpers.text_message_outputs(res_1.new_items),
ItemHelpers.text_message_outputs(res_2.new_items),
ItemHelpers.text_message_outputs(res_3.new_items),
]
translations = "\n\n".join(outputs)
print(f"\n\nTranslations:\n\n{translations}")
best_translation = await Runner.run(
translation_picker,
f"Input: {msg}\n\nTranslations:\n{translations}",
)
print("\n\n-----")
print(f"Best translation: {best_translation.final_output}")
if __name__ == "__main__":
asyncio.run(main())