Skip to content

Commit

Permalink
update documentation to use gpt-4o and gpt-4o-mini, and use correct o…
Browse files Browse the repository at this point in the history
…utlines interfaces
  • Loading branch information
lapp0 committed Sep 15, 2024
1 parent e2ec5d8 commit a2879e1
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 16 deletions.
2 changes: 1 addition & 1 deletion docs/reference/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@ By default, language models stop generating tokens after and <EOS> token was gen
```python
import outlines.models as models

complete = models.openai("gpt-3.5-turbo")
complete = models.openai("gpt-4o-mini")
expert = complete("Name an expert in quantum gravity.", stop_at=["\n", "."])
```
2 changes: 1 addition & 1 deletion examples/babyagi.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import outlines
import outlines.models as models

model = models.openai("gpt-3.5-turbo")
model = models.openai("gpt-4o-mini")
complete = outlines.generate.text(model)


Expand Down
2 changes: 1 addition & 1 deletion examples/math_generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def execute_code(code):


prompt = answer_with_code_prompt(question, examples)
model = models.openai("gpt-3.5-turbo")
model = models.openai("gpt-4o-mini")
answer = outlines.generate.text(model)(prompt)
result = execute_code(answer)
print(f"It takes Carla {result:.0f} minutes to download the file.")
2 changes: 1 addition & 1 deletion examples/meta_prompting.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def run_example(model_fn, question, model_name):
parser.add_argument(
"--model",
type=str,
default="gpt-3.5-turbo-1106",
default="gpt-4o-mini",
help="The Large Language Model to use to run the examples.",
)
args = parser.parse_args()
Expand Down
2 changes: 1 addition & 1 deletion examples/pick_odd_one_out.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def build_ooo_prompt(options):

options = ["sea", "mountains", "plains", "sock"]

model = models.openai("gpt-3.5-turbo")
model = models.openai("gpt-4o-mini")
gen_text = outlines.generate.text(model)
gen_choice = outlines.generate.choice(model, options)

Expand Down
21 changes: 11 additions & 10 deletions examples/react.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import requests # type: ignore

import outlines
import outlines.generate as generate
import outlines.models as models


Expand Down Expand Up @@ -45,25 +46,25 @@ def search_wikipedia(query: str):


prompt = build_reAct_prompt("Where is Apple Computers headquarted? ")
model = models.openai("gpt-3.5-turbo")
complete = outlines.generate.text(model)
model = models.openai("gpt-4o-mini")

mode_generator = generate.choice(model, choices=["Tho", "Act"])
action_generator = generate.choice(model, choices=["Search", "Finish"])
text_generator = generate.text(model)

for i in range(1, 10):
mode = complete.generate_choice(prompt, choices=["Tho", "Act"], max_tokens=128)
mode = mode_generator(prompt, max_tokens=128)
prompt = add_mode(i, mode, "", prompt)

if mode == "Tho":
thought = complete(prompt, stop_at="\n", max_tokens=128)
thought = text_generator(prompt, stop_at="\n", max_tokens=128)
prompt += f"{thought}"
elif mode == "Act":
action = complete.generate_choice(
prompt, choices=["Search", "Finish"], max_tokens=128
)
action = action_generator(prompt, max_tokens=128)
prompt += f"{action} '"

subject = complete(
prompt, stop_at=["'"], max_tokens=128
) # Apple Computers headquartered
subject = text_generator(prompt, stop_at=["'"], max_tokens=128)
# Apple Computers headquartered
subject = " ".join(subject.split()[:2])
prompt += f"{subject}'"

Expand Down
2 changes: 1 addition & 1 deletion examples/self_consistency.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def few_shots(question, examples):
"""


model = models.openai("gpt-3.5-turbo")
model = models.openai("gpt-4o-mini")
generator = outlines.generate.text(model)
prompt = few_shots(question, examples)
answers = generator(prompt, samples=10)
Expand Down

0 comments on commit a2879e1

Please sign in to comment.