Skip to content

Commit

Permalink
🔥 use env
Browse files Browse the repository at this point in the history
  • Loading branch information
bruceunx committed Apr 29, 2024
1 parent dc9ad7f commit 3478982
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 43 deletions.
13 changes: 0 additions & 13 deletions gpt_server/_config.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,3 @@
from pathlib import Path

Base = Path(__file__).resolve().parent.parent

APIs = {}

person_file = Base / '.person'

if person_file.is_file():
with open(Base / ".person", "r") as f:
for line in f.readlines():
key, value = line.split("=")
APIs[key] = value.strip()

if 'proxy' not in APIs:
APIs['proxy'] = None # type: ignore
19 changes: 10 additions & 9 deletions gpt_server/routers/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,29 @@
from datetime import datetime, timedelta
import json
from functools import wraps
import os

from fastapi import APIRouter, Request
from pydantic_settings import BaseSettings
from sse_starlette import EventSourceResponse
import aiohttp
from redis.asyncio import Redis

from .._config import APIs

from gpt_server.schemas.prompt import Prompt

router = APIRouter()

API_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent?alt=sse&key={APIs['gemini']}"
API_URL_PRO = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:streamGenerateContent?alt=sse&key={APIs['gemini']}"
api_key = os.environ['GEMINI_KEY']

API_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent?alt=sse&key={api_key}"
API_URL_PRO = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:streamGenerateContent?alt=sse&key={api_key}"

PROXY = os.environ['proxy'] if 'proxy' in os.environ else None


class Settings(BaseSettings):
redis_password: str = APIs["redis_password"]
redis_host: str = APIs["redis"]
redis_port: int = int(APIs["redis_port"])
redis_host: str = os.environ['REDIS_HOST']
redis_port: int = int(os.environ['REDIS_PORT'])
rate_limit_per_minute: int = 2


Expand All @@ -33,7 +35,6 @@ class Settings(BaseSettings):
port=settings.redis_port,
db=0,
decode_responses=True,
password=settings.redis_password,
)


Expand Down Expand Up @@ -101,7 +102,7 @@ async def wrapper(request: Request, *args, **kwargs):

async def get_content(data, url):
async with aiohttp.ClientSession() as sess:
async with sess.post(url, json=data, proxy=APIs["proxy"]) as res:
async with sess.post(url, json=data, proxy=PROXY) as res:
async for chunk in res.content:
_data = chunk.decode("utf-8")
if _data.strip() != "":
Expand Down
16 changes: 5 additions & 11 deletions gpt_server/routers/groq.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
import asyncio
import os

from fastapi import APIRouter, Request
from sse_starlette import EventSourceResponse
import aiohttp

from gpt_server.schemas.prompt import Prompt
from .._config import APIs

router = APIRouter()

url = "https://api.groq.com/openai/v1/chat/completions"

headers = {
"Authorization": f"Bearer {APIs['groq']}",
"Authorization": f"Bearer {os.environ['GROQ_KEY']}",
"Content-Type": "application/json",
}

PROXY = os.environ['proxy'] if 'proxy' in os.environ else None


async def get_content(data):
async with aiohttp.ClientSession(headers=headers) as sess:
async with sess.post(url, json=data, proxy=APIs["proxy"]) as res:
async with sess.post(url, json=data, proxy=PROXY) as res:
async for chunk in res.content:
if chunk:
yield chunk.decode("utf-8")[6:]
Expand All @@ -29,13 +30,6 @@ async def get_content(data):
break


async def generate_data():
for i in range(10):
await asyncio.sleep(.1) # Simulate some delay
yield '{"choices":[{"index":0,"delta":{"content":" handle"}}]}'
yield "[DONE]"


@router.post("/chat", status_code=200)
async def chat(request: Request, prompt: Prompt):

Expand Down
12 changes: 2 additions & 10 deletions gpt_server/routers/openai.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
import asyncio
import os

from fastapi import APIRouter, Request
from sse_starlette import EventSourceResponse
import aiohttp

from gpt_server.schemas.prompt import Prompt
from .._config import APIs

router = APIRouter()

url = "https://api.pumpkinaigc.online/v1/chat/completions"

headers = {
"Authorization": f"Bearer {APIs['openai']}",
"Authorization": f"Bearer {os.environ['OPENAI_KEY']}",
"Content-Type": "application/json",
}

Expand All @@ -29,13 +28,6 @@ async def get_content(data):
break


async def generate_data():
for i in range(10):
await asyncio.sleep(.1) # Simulate some delay
yield '{"choices":[{"index":0,"delta":{"content":" handle"}}]}'
yield "[DONE]"


@router.post("/chat", status_code=200)
async def chat(request: Request, prompt: Prompt):

Expand Down

0 comments on commit 3478982

Please sign in to comment.