Skip to content

Commit

Permalink
Fix babyagi.py and meta_prompting.py example scripts
Browse files Browse the repository at this point in the history
Fixes example scripts.

- Adds calls to *_fmt functions in `babyagi.py`, s.t. the script can
work properly without triggering an Exception.
- Removes one stop criterion and increases max_tokens in a few calls,
s.t. the answers by the GPTs are not truncated.
  • Loading branch information
HerrIvan authored Nov 12, 2023
1 parent a64aa76 commit 5f82da4
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 7 deletions.
4 changes: 4 additions & 0 deletions examples/babyagi.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ def one_cycle(objective: str, task_list, next_task_id: int):
)
new_tasks = model(prompt)

new_tasks = create_tasks_fmt(new_tasks)

for task in new_tasks:
next_task_id += 1
task_list.append({"task_id": next_task_id, "task_name": task})
Expand All @@ -143,6 +145,8 @@ def one_cycle(objective: str, task_list, next_task_id: int):
)
prioritized_tasks = model(prompt)

prioritized_tasks = prioritize_tasks_fmt(prioritized_tasks)

return task, result, prioritized_tasks, next_task_id


Expand Down
16 changes: 9 additions & 7 deletions examples/meta_prompting.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def solve(question):
Let's solve this problem by splitting it into steps.
"""

complete = models.text_completion.openai(model_name)
complete = models.text_completion.openai(model_name, max_tokens=500)

prompt = solve(question)
answer = complete(prompt)
Expand All @@ -43,12 +43,12 @@ def determine_goal(question):
def solve(memory):
"""{{memory}}. Let's begin."""

complete = models.text_completion.openai(model_name)
complete = models.text_completion.openai(model_name, max_tokens=500)

prompt = determine_goal(question)
answer = complete(prompt, stop_at=["."])
prompt = solve(prompt + answer)
answer = complete(prompt, stop_at=["."])
answer = complete(prompt)
completed = prompt + answer

return completed
Expand Down Expand Up @@ -76,14 +76,14 @@ def find_expert(question):
@text.prompt
def get_answer(question, expert, memory):
"""
{{memory}}
{{memory}}".
I am ready to ask my question.
"{{expert}}" I say,
{{question}}
"""

complete_expert = models.text_completion.openai(model_name)
complete_answer = models.text_completion.openai(model_name)
complete_answer = models.text_completion.openai(model_name, max_tokens=500)

prompt = find_expert(question)
expert = complete_expert(prompt, stop_at=['"'])
Expand Down Expand Up @@ -111,7 +111,7 @@ def get_answer(expert, memory):
"""

model_expert = models.text_completion.openai(model_name)
model_answer = models.text_completion.openai(model_name)
model_answer = models.text_completion.openai(model_name, max_tokens=500)

prompt = find_expert(question)
expert = model_expert(prompt, stop_at=["\n", "."])
Expand Down Expand Up @@ -157,7 +157,9 @@ def run_example(model_fn, question, model_name):
meaning_q = "What is the meaning of life?"

run_example(split_into_steps, math_q, args.model)
run_example(split_into_steps, sat_q, args.model)
run_example(
split_into_steps, sat_q, args.model
) # gpt>3.5 usually gets this one right
run_example(fill_in_the_blanks, sat_q, args.model)
run_example(ask_an_expert, alignment_q, args.model)
run_example(ask_an_expert_simple, meaning_q, args.model)

0 comments on commit 5f82da4

Please sign in to comment.