Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
pylint fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
patcher9 committed Mar 14, 2024
1 parent 9a90b92 commit 10d6a80
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 27 deletions.
5 changes: 2 additions & 3 deletions src/dokumetry/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@
__init__ module for dokumetry package.
"""
from anthropic import AsyncAnthropic, Anthropic

from openai import AsyncOpenAI, OpenAI
from mistralai.async_client import MistralAsyncClient
from mistralai.client import MistralClient

from .openai import init as init_openai
from .async_openai import init as init_async_openai
from .anthropic import init as init_anthropic
from .async_anthropic import init as init_async_anthropic
from mistralai.async_client import MistralAsyncClient
from mistralai.client import MistralClient
from .cohere import init as init_cohere
from .mistral import init as init_mistral
from .async_mistral import init as init_async_mistral
Expand Down
20 changes: 8 additions & 12 deletions src/dokumetry/async_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def patched_chat(*args, **kwargs):
formatted_messages.append(f"{role}: {content_str}")
else:
formatted_messages.append(f"{role}: {content}")

prompt = " ".join(formatted_messages)
model = kwargs.get('model')

Expand All @@ -81,8 +81,7 @@ async def patched_chat(*args, **kwargs):
"response": response.choices[0].message.content
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response

Expand Down Expand Up @@ -127,7 +126,7 @@ async def stream_generator():
formatted_messages.append(f"{role}: {content_str}")
else:
formatted_messages.append(f"{role}: {content}")

prompt = " ".join(formatted_messages)

data = {
Expand All @@ -145,12 +144,11 @@ async def stream_generator():
"completionTokens": completion_tokens,
"totalTokens": total_tokens
}

print(data)
#send_data(data, doku_url, api_key)

send_data(data, doku_url, api_key)

return stream_generator()

async def patched_embeddings(*args, **kwargs):
"""
Patched version of Cohere's embeddings generate method.
Expand Down Expand Up @@ -185,12 +183,10 @@ async def patched_embeddings(*args, **kwargs):
"totalTokens": response.usage.total_tokens,
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response



llm.chat = patched_chat
llm.chat_stream = patched_chat_stream
llm.embeddings = patched_embeddings
19 changes: 8 additions & 11 deletions src/dokumetry/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def patched_chat(*args, **kwargs):
formatted_messages.append(f"{role}: {content_str}")
else:
formatted_messages.append(f"{role}: {content}")

prompt = " ".join(formatted_messages)
model = kwargs.get('model')

Expand All @@ -81,11 +81,10 @@ def patched_chat(*args, **kwargs):
"response": response.choices[0].message.content
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response

#pylint: disable=too-many-locals
def patched_chat_stream(*args, **kwargs):
"""
Expand Down Expand Up @@ -127,7 +126,7 @@ def stream_generator():
formatted_messages.append(f"{role}: {content_str}")
else:
formatted_messages.append(f"{role}: {content}")

prompt = " ".join(formatted_messages)

data = {
Expand All @@ -146,11 +145,11 @@ def stream_generator():
"totalTokens": total_tokens
}

print(data)
#send_data(data, doku_url, api_key)

send_data(data, doku_url, api_key)

return stream_generator()

def patched_embeddings(*args, **kwargs):
"""
Patched version of Cohere's embeddings generate method.
Expand Down Expand Up @@ -185,12 +184,10 @@ def patched_embeddings(*args, **kwargs):
"totalTokens": response.usage.total_tokens,
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response


llm.chat = patched_chat
llm.chat_stream = patched_chat_stream
llm.embeddings = patched_embeddings
3 changes: 2 additions & 1 deletion tests/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ openai>=1.1.1
anthropic>=0.19.0
pytest>=7.4.0
requests>=2.31.0
pylint>=3.0.2
pylint>=3.0.2
mistralai>=0.1.5

0 comments on commit 10d6a80

Please sign in to comment.