-
Notifications
You must be signed in to change notification settings - Fork 63
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: add llama hugging face apis #7
Changes from all commits
f65ee69
03959c0
ae2b46c
c9bd48a
70edf76
20e853d
2832d67
66ad748
3915292
768916a
8c3f877
b595f6f
802350d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
name: Tests | ||
|
||
on: | ||
push: | ||
branches: [main] | ||
pull_request: | ||
branches: [main] | ||
|
||
jobs: | ||
build: | ||
runs-on: ubuntu-latest | ||
|
||
steps: | ||
- uses: actions/checkout@v3 | ||
- name: Set up Python 3.11 | ||
uses: actions/setup-python@v4 | ||
with: | ||
python-version: "3.11" | ||
cache: "pip" | ||
- name: Install dependencies | ||
run: | | ||
python -m pip install --upgrade pip | ||
pip install pytest | ||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi | ||
- name: Test with pytest | ||
run: | | ||
python -m pytest tests/ |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,7 @@ | ||
Genoss/* | ||
.DS_Store | ||
.gitignore | ||
**.pyc | ||
genoss/model/ggml-gpt4all-j-v1.3-groovy.bin | ||
llm/ggml-gpt4all-j-v1.3-groovy.bin | ||
|
||
local_models/* | ||
|
||
.env | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,7 @@ | ||
{ | ||
"[python]": { | ||
"editor.defaultFormatter": "ms-python.black-formatter" | ||
}, | ||
"python.formatting.provider": "none" | ||
} | ||
"[python]": { | ||
"editor.defaultFormatter": "ms-python.black-formatter" | ||
}, | ||
"python.formatting.provider": "black", | ||
"python.linting.enabled": true | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -72,7 +72,7 @@ cd ../../gpt4all-bindings/python | |
pip3 install -e . | ||
``` | ||
|
||
7. Download it to your local machine from [here](https://gpt4all.io/models/ggml-gpt4all-j-v1.3-groovy.bin) and put it in the `genoss/model` directory as `genoss/model/ggml-gpt4all-j-v1.3-groovy.bin` | ||
7. Download it to your local machine from [here](https://gpt4all.io/models/ggml-gpt4all-j-v1.3-groovy.bin) and put it in the `local_models` directory as `local_models/ggml-gpt4all-j-v1.3-groovy.bin` | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I agree |
||
|
||
</details> | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
from fastapi import APIRouter | ||
|
||
from logger import get_logger | ||
|
||
logger = get_logger(__name__) | ||
|
||
misc_router = APIRouter() | ||
|
||
|
||
@misc_router.get("/", tags=["Root"]) | ||
async def get_root(): | ||
return "Genoss API is running!" |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
from typing import Optional | ||
|
||
from fastapi import Header, HTTPException | ||
|
||
|
||
class AuthHandler: | ||
@staticmethod | ||
async def check_auth_header( | ||
authorization: Optional[str] = Header(None), | ||
): | ||
if authorization is None: | ||
return None | ||
|
||
components = authorization.split() | ||
|
||
if len(components) != 2 or components[0].lower() != "bearer": | ||
raise HTTPException(status_code=403, detail="Invalid authorization header") | ||
|
||
api_key = components[1] | ||
return api_key |
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
import time | ||
import uuid | ||
from typing import Any, Dict | ||
|
||
from genoss.entities.chat.message import Message | ||
|
||
|
||
class ChatCompletion: | ||
class Choice: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. enclosing classes makes more sense here |
||
def __init__( | ||
self, message: Message, finish_reason: str = "stop", index: int = 0 | ||
): | ||
self.message = message | ||
self.finish_reason = finish_reason | ||
self.index = index | ||
|
||
def to_dict(self) -> Dict[str, Any]: | ||
return { | ||
"message": self.message.to_dict(), | ||
"finish_reason": self.finish_reason, | ||
"index": self.index, | ||
} | ||
|
||
class Usage: | ||
def __init__( | ||
self, prompt_tokens: int, completion_tokens: int, total_tokens: int | ||
): | ||
self.prompt_tokens = prompt_tokens | ||
self.completion_tokens = completion_tokens | ||
self.total_tokens = total_tokens | ||
|
||
def to_dict(self) -> Dict[str, Any]: | ||
return { | ||
"prompt_tokens": self.prompt_tokens, | ||
"completion_tokens": self.completion_tokens, | ||
"total_tokens": self.total_tokens, | ||
} | ||
|
||
def __init__(self, model: str, question: str, answer: str): | ||
self.id = str(uuid.uuid4()) | ||
self.object = "chat.completion" | ||
self.created = int(time.time()) | ||
self.model = model | ||
self.usage = self.Usage(len(question), len(answer), len(question) + len(answer)) | ||
self.choices = [ | ||
self.Choice(Message(role="assistant", content=answer), "stop", 0) | ||
] | ||
|
||
def to_dict(self) -> Dict[str, Any]: | ||
return { | ||
"id": self.id, | ||
"object": self.object, | ||
"created": self.created, | ||
"model": self.model, | ||
"usage": self.usage.to_dict(), | ||
"choices": [choice.to_dict() for choice in self.choices], | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,35 +1,37 @@ | ||
from __future__ import annotations | ||
|
||
from typing import Dict | ||
|
||
from langchain import PromptTemplate, LLMChain | ||
from langchain.llms import FakeListLLM | ||
from langchain import LLMChain | ||
from langchain.embeddings import FakeEmbeddings | ||
from genoss.model.base_genoss_llm import BaseGenossLLM | ||
from genoss.chat.chat_completion import ChatCompletion | ||
from langchain.llms import FakeListLLM | ||
|
||
from genoss.entities.chat.chat_completion import ChatCompletion | ||
from genoss.llm.base_genoss import BaseGenossLLM | ||
from genoss.prompts.prompt_template import prompt_template | ||
|
||
FAKE_LLM_NAME = "fake" | ||
|
||
|
||
class FakeLLM(BaseGenossLLM): | ||
name: str = FAKE_LLM_NAME | ||
description: str = "Fake LLM for testing purpose" | ||
model_path: str = "" | ||
|
||
def generate_answer(self, messages: list) -> Dict: | ||
def generate_answer(self, question: str) -> Dict: | ||
print("Generating Answer") | ||
print(messages) | ||
last_messages = messages | ||
|
||
llm = FakeListLLM(responses=["Hello from FakeLLM!"]) | ||
prompt_template = "Question from user: {question}?, Answer from bot:" | ||
llm_chain = LLMChain( | ||
llm=llm, prompt=PromptTemplate.from_template(prompt_template) | ||
) | ||
response_text = llm_chain(last_messages) | ||
|
||
llm_chain = LLMChain(llm=llm, prompt=prompt_template) | ||
response_text = llm_chain(question) | ||
|
||
print("###################") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. logger instead of that |
||
print(response_text) | ||
|
||
answer = response_text["text"] | ||
chat_completion = ChatCompletion(model=self.name, answer=answer, last_messages=last_messages) | ||
chat_completion = ChatCompletion( | ||
model=self.name, answer=answer, question=question | ||
) | ||
|
||
return chat_completion.to_dict() | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
thanks for that !