Skip to content

Commit

Permalink
Version 1.1.2
Browse files Browse the repository at this point in the history
  • Loading branch information
Brandon Lefore committed Feb 2, 2024
1 parent 24e9f35 commit 2155694
Show file tree
Hide file tree
Showing 100 changed files with 3,225 additions and 3,068 deletions.
2 changes: 1 addition & 1 deletion abacusai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@
from .streaming_client import StreamingClient


__version__ = "1.1.1"
__version__ = "1.1.2"
1 change: 1 addition & 0 deletions abacusai/api_class/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ class FileFormat(ApiEnum):
NUMBERS = 'NUMBERS'
PPTX = 'PPTX'
PPT = 'PPT'
HTML = 'HTML'


class ExperimentationMode(ApiEnum):
Expand Down
4 changes: 4 additions & 0 deletions abacusai/api_class/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,10 +442,12 @@ class ChatLLMTrainingConfig(TrainingConfig):
include_general_knowledge (bool): Allow the LLM to rely not just on search results, but to fall back on general knowledge.
behavior_instructions (str): Customize the overall role instructions for the LLM.
response_instructions (str): Customize instructions for what the LLM responses should look like.
lookup_rewrite_instructions (str): Instructions for a LLM call to automatically generate filter expressions on document metadata to retrieve relevant documents for the conversation.
max_search_results (int): Maximum number of search results in the retrieval augmentation step. If we know that the questions are likely to have snippets which are easily matched in the documents, then a lower number will help with accuracy.
data_feature_group_ids: (List[str]): List of feature group ids to use to possibly query for the chatllm.
data_prompt_context (str): Prompt context for the data feature group ids.
hide_generated_sql (bool): When running data queries, hides the generated SQL in the response and will just return the table.
disable_data_summarization (bool): After executing a query summarize the reponse and reply back with only the table and query run.
"""
document_retrievers: List[str] = None
num_completion_tokens: int = None
Expand All @@ -454,10 +456,12 @@ class ChatLLMTrainingConfig(TrainingConfig):
include_general_knowledge: bool = None
behavior_instructions: str = None
response_instructions: str = None
lookup_rewrite_instructions: str = None
max_search_results: int = None
data_feature_group_ids: List[str] = None
data_prompt_context: str = None
hide_generated_sql: bool = None
disable_data_summarization: bool = None

def __post_init__(self):
self.problem_type = enums.ProblemType.CHAT_LLM
Expand Down
14 changes: 9 additions & 5 deletions abacusai/api_endpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,22 @@ class ApiEndpoint(AbstractApiClass):
predictEndpoint (str): The URI that can be used to make predict calls against Deployments
proxyEndpoint (str): The URI that can be used to make proxy server calls
llmEndpoint (str): The URI that can be used to make llm api calls
externalChatEndpoint (str): The URI that can be used to access the external chat
dashboardEndpoint (str): The URI that the external chat will use to go back to the dashboard
"""

def __init__(self, client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None):
def __init__(self, client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None):
super().__init__(client, None)
self.api_endpoint = apiEndpoint
self.predict_endpoint = predictEndpoint
self.proxy_endpoint = proxyEndpoint
self.llm_endpoint = llmEndpoint
self.external_chat_endpoint = externalChatEndpoint
self.dashboard_endpoint = dashboardEndpoint

def __repr__(self):
repr_dict = {f'api_endpoint': repr(self.api_endpoint), f'predict_endpoint': repr(
self.predict_endpoint), f'proxy_endpoint': repr(self.proxy_endpoint), f'llm_endpoint': repr(self.llm_endpoint)}
repr_dict = {f'api_endpoint': repr(self.api_endpoint), f'predict_endpoint': repr(self.predict_endpoint), f'proxy_endpoint': repr(self.proxy_endpoint), f'llm_endpoint': repr(
self.llm_endpoint), f'external_chat_endpoint': repr(self.external_chat_endpoint), f'dashboard_endpoint': repr(self.dashboard_endpoint)}
class_name = "ApiEndpoint"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None])
Expand All @@ -35,6 +39,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
resp = {'api_endpoint': self.api_endpoint, 'predict_endpoint': self.predict_endpoint,
'proxy_endpoint': self.proxy_endpoint, 'llm_endpoint': self.llm_endpoint}
resp = {'api_endpoint': self.api_endpoint, 'predict_endpoint': self.predict_endpoint, 'proxy_endpoint': self.proxy_endpoint,
'llm_endpoint': self.llm_endpoint, 'external_chat_endpoint': self.external_chat_endpoint, 'dashboard_endpoint': self.dashboard_endpoint}
return {key: value for key, value in resp.items() if value is not None}
2 changes: 1 addition & 1 deletion abacusai/application_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def to_dict(self):
'name': self.name, 'created_at': self.created_at, 'status': self.status, 'auth': self.auth}
return {key: value for key, value in resp.items() if value is not None}

def rename(self, name: str = None):
def rename(self, name: str):
"""
Renames a Application Connector
Expand Down
6 changes: 3 additions & 3 deletions abacusai/batch_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def set_database_connector_output(self, database_connector_id: str = None, datab
"""
return self.client.set_batch_prediction_database_connector_output(self.batch_prediction_id, database_connector_id, database_output_config)

def set_feature_group_output(self, table_name: str = None):
def set_feature_group_output(self, table_name: str):
"""
Creates a feature group and sets it as the batch prediction output.
Expand All @@ -207,7 +207,7 @@ def set_output_to_console(self):
"""
return self.client.set_batch_prediction_output_to_console(self.batch_prediction_id)

def set_feature_group(self, feature_group_type: str = None, feature_group_id: str = None):
def set_feature_group(self, feature_group_type: str, feature_group_id: str = None):
"""
Sets the batch prediction input feature group.
Expand All @@ -220,7 +220,7 @@ def set_feature_group(self, feature_group_type: str = None, feature_group_id: st
"""
return self.client.set_batch_prediction_feature_group(self.batch_prediction_id, feature_group_type, feature_group_id)

def set_dataset_remap(self, dataset_id_remap: dict = None):
def set_dataset_remap(self, dataset_id_remap: dict):
"""
For the purpose of this batch prediction, will swap out datasets in the training feature groups
Expand Down
4 changes: 2 additions & 2 deletions abacusai/chat_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def get(self):
"""
return self.client.get_chat_session(self.chat_session_id)

def delete_chat_message(self, message_index: int = None):
def delete_chat_message(self, message_index: int):
"""
Deletes a message in a chat session and its associated response.
Expand All @@ -84,7 +84,7 @@ def export(self):
"""
return self.client.export_chat_session(self.chat_session_id)

def rename(self, name: str = None):
def rename(self, name: str):
"""
Renames a chat session with Data Science Co-pilot.
Expand Down
Loading

0 comments on commit 2155694

Please sign in to comment.