diff --git a/abacusai/__init__.py b/abacusai/__init__.py
index 12323cf22..b8eec3094 100644
--- a/abacusai/__init__.py
+++ b/abacusai/__init__.py
@@ -4,4 +4,4 @@
from .streaming_client import StreamingClient
-__version__ = "1.2.1"
+__version__ = "1.2.2"
diff --git a/abacusai/agent.py b/abacusai/agent.py
index b3ab87179..a40e89d22 100644
--- a/abacusai/agent.py
+++ b/abacusai/agent.py
@@ -1,4 +1,5 @@
from .agent_version import AgentVersion
+from .api_class import WorkflowGraph
from .code_source import CodeSource
from .return_class import AbstractApiClass
@@ -24,7 +25,7 @@ class Agent(AbstractApiClass):
codeSource (CodeSource): If a python model, information on the source code
"""
- def __init__(self, client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={}):
+ def __init__(self, client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={}, workflowGraph={}):
super().__init__(client, agentId)
self.name = name
self.agent_id = agentId
@@ -40,11 +41,12 @@ def __init__(self, client, name=None, agentId=None, createdAt=None, projectId=No
self.code_source = client._build_class(CodeSource, codeSource)
self.latest_agent_version = client._build_class(
AgentVersion, latestAgentVersion)
+ self.workflow_graph = client._build_class(WorkflowGraph, workflowGraph)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'agent_id': repr(self.agent_id), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'notebook_id': repr(self.notebook_id), f'predict_function_name': repr(self.predict_function_name), f'source_code': repr(
- self.source_code), f'agent_config': repr(self.agent_config), f'memory': repr(self.memory), f'training_required': repr(self.training_required), f'agent_execution_config': repr(self.agent_execution_config), f'code_source': repr(self.code_source), f'latest_agent_version': repr(self.latest_agent_version)}
+ repr_dict = {f'name': repr(self.name), f'agent_id': repr(self.agent_id), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'notebook_id': repr(self.notebook_id), f'predict_function_name': repr(self.predict_function_name), f'source_code': repr(self.source_code), f'agent_config': repr(
+ self.agent_config), f'memory': repr(self.memory), f'training_required': repr(self.training_required), f'agent_execution_config': repr(self.agent_execution_config), f'code_source': repr(self.code_source), f'latest_agent_version': repr(self.latest_agent_version), f'workflow_graph': repr(self.workflow_graph)}
class_name = "Agent"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -57,8 +59,8 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name, 'agent_id': self.agent_id, 'created_at': self.created_at, 'project_id': self.project_id, 'notebook_id': self.notebook_id, 'predict_function_name': self.predict_function_name, 'source_code': self.source_code, 'agent_config': self.agent_config,
- 'memory': self.memory, 'training_required': self.training_required, 'agent_execution_config': self.agent_execution_config, 'code_source': self._get_attribute_as_dict(self.code_source), 'latest_agent_version': self._get_attribute_as_dict(self.latest_agent_version)}
+ resp = {'name': self.name, 'agent_id': self.agent_id, 'created_at': self.created_at, 'project_id': self.project_id, 'notebook_id': self.notebook_id, 'predict_function_name': self.predict_function_name, 'source_code': self.source_code, 'agent_config': self.agent_config, 'memory': self.memory,
+ 'training_required': self.training_required, 'agent_execution_config': self.agent_execution_config, 'code_source': self._get_attribute_as_dict(self.code_source), 'latest_agent_version': self._get_attribute_as_dict(self.latest_agent_version), 'workflow_graph': self._get_attribute_as_dict(self.workflow_graph)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
def refresh(self):
@@ -83,6 +85,19 @@ def describe(self):
"""
return self.client.describe_agent(self.agent_id)
+ def list_versions(self, limit: int = 100, start_after_version: str = None):
+ """
+ List all versions of an agent.
+
+ Args:
+ limit (int): If provided, limits the number of agent versions returned.
+ start_after_version (str): Unique string identifier of the version after which the list starts.
+
+ Returns:
+ list[AgentVersion]: An array of Agent versions.
+ """
+ return self.client.list_agent_versions(self.agent_id, limit, start_after_version)
+
def wait_for_publish(self, timeout=None):
"""
A waiting call until agent is published.
diff --git a/abacusai/agent_version.py b/abacusai/agent_version.py
index 744b502d6..cb7492f31 100644
--- a/abacusai/agent_version.py
+++ b/abacusai/agent_version.py
@@ -1,3 +1,4 @@
+from .api_class import WorkflowGraph
from .code_source import CodeSource
from .return_class import AbstractApiClass
@@ -21,7 +22,7 @@ class AgentVersion(AbstractApiClass):
codeSource (CodeSource): If a python model, information on where the source code is located.
"""
- def __init__(self, client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={}):
+ def __init__(self, client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={}, workflowGraph={}):
super().__init__(client, agentVersion)
self.agent_version = agentVersion
self.status = status
@@ -34,11 +35,12 @@ def __init__(self, client, agentVersion=None, status=None, agentId=None, agentCo
self.error = error
self.agent_execution_config = agentExecutionConfig
self.code_source = client._build_class(CodeSource, codeSource)
+ self.workflow_graph = client._build_class(WorkflowGraph, workflowGraph)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'agent_version': repr(self.agent_version), f'status': repr(self.status), f'agent_id': repr(self.agent_id), f'agent_config': repr(self.agent_config), f'publishing_started_at': repr(self.publishing_started_at), f'publishing_completed_at': repr(
- self.publishing_completed_at), f'pending_deployment_ids': repr(self.pending_deployment_ids), f'failed_deployment_ids': repr(self.failed_deployment_ids), f'error': repr(self.error), f'agent_execution_config': repr(self.agent_execution_config), f'code_source': repr(self.code_source)}
+ repr_dict = {f'agent_version': repr(self.agent_version), f'status': repr(self.status), f'agent_id': repr(self.agent_id), f'agent_config': repr(self.agent_config), f'publishing_started_at': repr(self.publishing_started_at), f'publishing_completed_at': repr(self.publishing_completed_at), f'pending_deployment_ids': repr(
+ self.pending_deployment_ids), f'failed_deployment_ids': repr(self.failed_deployment_ids), f'error': repr(self.error), f'agent_execution_config': repr(self.agent_execution_config), f'code_source': repr(self.code_source), f'workflow_graph': repr(self.workflow_graph)}
class_name = "AgentVersion"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -51,8 +53,8 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'agent_version': self.agent_version, 'status': self.status, 'agent_id': self.agent_id, 'agent_config': self.agent_config, 'publishing_started_at': self.publishing_started_at, 'publishing_completed_at': self.publishing_completed_at,
- 'pending_deployment_ids': self.pending_deployment_ids, 'failed_deployment_ids': self.failed_deployment_ids, 'error': self.error, 'agent_execution_config': self.agent_execution_config, 'code_source': self._get_attribute_as_dict(self.code_source)}
+ resp = {'agent_version': self.agent_version, 'status': self.status, 'agent_id': self.agent_id, 'agent_config': self.agent_config, 'publishing_started_at': self.publishing_started_at, 'publishing_completed_at': self.publishing_completed_at, 'pending_deployment_ids': self.pending_deployment_ids,
+ 'failed_deployment_ids': self.failed_deployment_ids, 'error': self.error, 'agent_execution_config': self.agent_execution_config, 'code_source': self._get_attribute_as_dict(self.code_source), 'workflow_graph': self._get_attribute_as_dict(self.workflow_graph)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
def refresh(self):
diff --git a/abacusai/api_class/ai_agents.py b/abacusai/api_class/ai_agents.py
index 0ba425b71..4956c950a 100644
--- a/abacusai/api_class/ai_agents.py
+++ b/abacusai/api_class/ai_agents.py
@@ -1,5 +1,5 @@
import dataclasses
-from typing import Union
+from typing import List, Union
from . import enums
from .abstract import ApiClass
@@ -20,3 +20,152 @@ class FieldDescriptor(ApiClass):
description: str = dataclasses.field(default=None)
example_extraction: Union[str, int, bool, float, list, dict] = dataclasses.field(default=None)
type: enums.FieldDescriptorType = dataclasses.field(default=enums.FieldDescriptorType.STRING)
+
+
+@dataclasses.dataclass
+class WorkflowNodeInputMapping(ApiClass):
+ """
+ A mapping of input to a workflow node.
+
+ Args:
+ name (str): The name of the input.
+ variable_type (str): The type of the input.
+ workflow_variable_source (str): The workflow source stage of the input.
+ is_required (bool): Whether the input is required.
+ """
+ name: str
+ variable_type: enums.WorkflowNodeInputType
+ workflow_variable_source: str = dataclasses.field(default=None)
+ is_required: bool = dataclasses.field(default=True)
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'variable_type': self.variable_type,
+ 'workflow_variable_source': self.workflow_variable_source,
+ 'is_required': self.is_required
+ }
+
+
+@dataclasses.dataclass
+class WorkflowNodeOutputMapping(ApiClass):
+ """
+ A mapping of output to a workflow node.
+
+ Args:
+ name (str): The name of the output.
+ variable_type (str): The type of the output.
+ """
+ name: str
+ variable_type: enums.WorkflowNodeOutputType = dataclasses.field(default=enums.WorkflowNodeOutputType.STRING)
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'variable_type': self.variable_type
+ }
+
+
+@dataclasses.dataclass
+class WorkflowGraphNode(ApiClass):
+ """
+ A node in an Agent workflow graph.
+
+ Args:
+ name (str): Display name of the worflow node.
+ input_mappings (List[WorkflowNodeInputMapping]): List of input mappings for the node.
+ output_mappings (List[WorkflowNodeOutputMapping]): List of output mappings for the node.
+ function (callable): The callable node function reference if available.
+ function_name (str): The name of the function if available.
+ source_code (str): The source code of the function if available.
+ input_schema (dict): The react json schema for the input form if applicable.
+ output_schema (dict): The react json schema for the output if applicable.
+ package_requirements (list): List of package requirements for the node.
+ """
+
+ def __init__(self, name: str, input_mappings: List[WorkflowNodeInputMapping], output_mappings: List[WorkflowNodeOutputMapping], function: callable = None, function_name: str = None, source_code: str = None, input_schema: dict = None, output_schema: dict = None, package_requirements: list = None):
+ if function:
+ import inspect
+ self.function_name = function.__name__
+ self.source_code = inspect.getsource(function)
+ elif function_name and source_code:
+ self.function_name = function_name
+ self.source_code = source_code
+ else:
+ raise ValueError('Either function or function_name and source_code must be provided.')
+
+ self.name = name
+ self.input_mappings = input_mappings
+ self.output_mappings = output_mappings
+ self.input_schema = input_schema if input_schema else {}
+ self.output_schema = output_schema if output_schema else {}
+ self.package_requirements = package_requirements if package_requirements else []
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'function_name': self.function_name,
+ 'source_code': self.source_code,
+ 'input_mappings': [mapping.to_dict() for mapping in self.input_mappings],
+ 'output_mappings': [mapping.to_dict() for mapping in self.output_mappings],
+ 'input_schema': self.input_schema,
+ 'output_schema': self.output_schema,
+ 'package_requirements': self.package_requirements
+ }
+
+ @classmethod
+ def from_dict(cls, node: dict):
+ return cls(
+ name=node['name'],
+ function_name=node['function_name'],
+ source_code=node['source_code'],
+ input_mappings=[WorkflowNodeInputMapping(**mapping) for mapping in node['input_mappings']],
+ output_mappings=[WorkflowNodeOutputMapping(**mapping) for mapping in node['output_mappings']],
+ input_schema=node.get('input_schema', {}),
+ output_schema=node.get('output_schema', {}),
+ package_requirements=node.get('package_requirements', [])
+ )
+
+
+@dataclasses.dataclass
+class WorkflowGraphEdge(ApiClass):
+ """
+ An edge in an Agent workflow graph.
+
+ Args:
+ source (str): The source node of the edge.
+ target (str): The target node of the edge.
+ details (dict): Additional details about the edge.
+ """
+ source: str
+ target: str
+ details: dict = dataclasses.field(default_factory=dict)
+
+ def to_nx_edge(self):
+ return [self.source, self.target, self.details]
+
+
+@dataclasses.dataclass
+class WorkflowGraph(ApiClass):
+ """
+ An Agent workflow graph.
+
+ Args:
+ nodes (List[WorkflowGraphNode]): A list of nodes in the workflow graph.
+ edges (List[WorkflowGraphEdge]): A list of edges in the workflow graph, where each edge is a tuple of source, target and details.
+ """
+ nodes: List[WorkflowGraphNode] = dataclasses.field(default_factory=list)
+ edges: List[WorkflowGraphEdge] = dataclasses.field(default_factory=list)
+
+ def to_dict(self):
+ return {
+ 'nodes': [node.to_dict() for node in self.nodes],
+ 'edges': [edge.to_dict() for edge in self.edges]
+ }
+
+ @classmethod
+ def from_dict(cls, graph: dict):
+ return cls(
+ nodes=[WorkflowGraphNode.from_dict(node) for node in graph.get('nodes', [])],
+ edges=[WorkflowGraphEdge.from_dict(edge) for edge in graph.get('edges', [])]
+ )
diff --git a/abacusai/api_class/batch_prediction.py b/abacusai/api_class/batch_prediction.py
index ca02caa1b..3e3ac0e9f 100644
--- a/abacusai/api_class/batch_prediction.py
+++ b/abacusai/api_class/batch_prediction.py
@@ -32,7 +32,7 @@ class ForecastingBatchPredictionArgs(BatchPredictionArgs):
forecasting_horizon (int): The number of timestamps to predict in the future. Range: [1, 1000].
item_attributes_to_include_in_the_result (list): List of columns to include in the prediction output.
explain_predictions (bool): If True, calculates explanations for the forecasted values along with predictions.
- automate_monitoring (bool): If True, creates a monitor to calculate the drift for the batch prediction.
+ automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
predictions_start_date: str = dataclasses.field(default=None)
@@ -100,7 +100,7 @@ class PredictiveModelingBatchPredictionArgs(BatchPredictionArgs):
explanation_filter_label (str): For classification problems specifies the label to which the explanation bounds are applied.
output_columns (list): A list of column names to include in the prediction result.
explain_predictions (bool): If True, calculates explanations for the predicted values along with predictions.
- automate_monitoring (bool): If True, creates a monitor to calculate the drift for the batch prediction.
+ automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
explainer_type: enums.ExplainerType = dataclasses.field(default=None)
@@ -194,7 +194,7 @@ class TrainablePlugAndPlayBatchPredictionArgs(BatchPredictionArgs):
Args:
for_eval (bool): If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
- automate_monitoring (bool): If True, creates a monitor to calculate the drift for the batch prediction.
+ automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
automate_monitoring: bool = dataclasses.field(default=None)
diff --git a/abacusai/api_class/dataset_application_connector.py b/abacusai/api_class/dataset_application_connector.py
index da88e079e..d6553a33c 100644
--- a/abacusai/api_class/dataset_application_connector.py
+++ b/abacusai/api_class/dataset_application_connector.py
@@ -159,6 +159,16 @@ def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.ABACUSUSAGEMETRICS
+@dataclasses.dataclass
+class FreshserviceDatasetConfig(DatasetConfig):
+ """
+ Dataset config for Freshservice Application Connector
+ """
+
+ def __post_init__(self):
+ self.application_connector_type = enums.ApplicationConnectorType.FRESHSERVICE
+
+
@dataclasses.dataclass
class _DatasetConfigFactory(_ApiClassFactory):
config_abstract_class = DatasetConfig
@@ -172,4 +182,5 @@ class _DatasetConfigFactory(_ApiClassFactory):
enums.ApplicationConnectorType.SHAREPOINT: SharepointDatasetConfig,
enums.ApplicationConnectorType.ZENDESK: ZendeskDatasetConfig,
enums.ApplicationConnectorType.ABACUSUSAGEMETRICS: AbacusUsageMetricsDatasetConfig,
+ enums.ApplicationConnectorType.FRESHSERVICE: FreshserviceDatasetConfig,
}
diff --git a/abacusai/api_class/enums.py b/abacusai/api_class/enums.py
index 919b63919..368067a67 100644
--- a/abacusai/api_class/enums.py
+++ b/abacusai/api_class/enums.py
@@ -361,6 +361,7 @@ class ApplicationConnectorType(ApiEnum):
TEAMS = 'TEAMS'
ABACUSUSAGEMETRICS = 'ABACUSUSAGEMETRICS'
MICROSOFTAUTH = 'MICROSOFTAUTH'
+ FRESHSERVICE = 'FRESHSERVICE'
class PythonFunctionArgumentType(ApiEnum):
@@ -419,6 +420,7 @@ class LLMName(ApiEnum):
PALM = 'PALM'
PALM_TEXT = 'PALM_TEXT'
GEMINI_PRO = 'GEMINI_PRO'
+ GEMINI_1_5_PRO = 'GEMINI_1_5_PRO'
MIXTRAL_CHAT = 'MIXTRAL_CHAT'
MISTRAL_MEDIUM = 'MISTRAL_MEDIUM'
ABACUS_SMAUG2 = 'ABACUS_SMAUG2'
@@ -484,6 +486,20 @@ class FieldDescriptorType(ApiEnum):
DATE = 'DATE'
+class WorkflowNodeInputType(ApiEnum):
+ USER_INPUT = 'USER_INPUT'
+ WORKFLOW_VARIABLE = 'WORKFLOW_VARIABLE'
+
+
+class WorkflowNodeOutputType(ApiEnum):
+ INTEGER = 'INTEGER'
+ STRING = 'STRING'
+ BOOLEAN = 'BOOLEAN'
+ FLOAT = 'FLOAT'
+ JSON = 'JSON'
+ LIST = 'LIST'
+
+
class OcrMode(ApiEnum):
DEFAULT = 'DEFAULT'
LAYOUT = 'LAYOUT'
diff --git a/abacusai/api_class/model.py b/abacusai/api_class/model.py
index 48a3b242c..19ca2e7ef 100644
--- a/abacusai/api_class/model.py
+++ b/abacusai/api_class/model.py
@@ -390,7 +390,6 @@ class NamedEntityExtractionTrainingConfig(TrainingConfig):
test_row_indicator (str): Column indicating which rows to use for training (TRAIN) and testing (TEST).
active_labels_column (str): Entities that have been marked in a particular text
document_format (NLPDocumentFormat): Format of the input documents.
- include_longformer (bool): Whether to include the longformer model.
minimum_bounding_box_overlap_ratio (float): Tokens are considered to belong to annotation if the user bounding box is provided and ratio of (token_bounding_box ∩ annotation_bounding_box) / token_bounding_area is greater than the provided value.
save_predicted_pdf (bool): Whether to save predicted PDF documents
enhanced_ocr (bool): Enhanced text extraction from predicted digital documents
@@ -424,7 +423,6 @@ class NaturalLanguageSearchTrainingConfig(TrainingConfig):
larger_embeddings (bool): Use a higher dimension embedding model.
search_chunk_size (int): Chunk size for indexing the documents.
chunk_overlap_fraction (float): Overlap in chunks while indexing the documents.
- test_split (int): Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
index_fraction (float): Fraction of the chunk to use for indexing.
"""
abacus_internal_model: bool = dataclasses.field(default=None)
@@ -751,21 +749,9 @@ class CustomAlgorithmTrainingConfig(TrainingConfig):
Training config for the CUSTOM_ALGORITHM problem type
Args:
- train_function_name (str): The name of the train function.
- predict_many_function_name (str): The name of the predict many function.
- training_input_tables (List[str]): List of tables to use for training.
- predict_function_name (str): Optional name of the predict function if the predict many function is not given.
- train_module_name (str): The name of the train module - only relevant if model is being uploaded from a zip file or github repositoty.
- predict_module_name (str): The name of the predict module - only relevant if model is being uploaded from a zip file or github repositoty.
- test_split (int): Percent of dataset to use for test data. We support using a range between 6% to 20% of your dataset to use as test data.
+ timeout_minutes (int): Timeout for the model training in minutes.
"""
- train_function_name: str = dataclasses.field(default=None)
- predict_many_function_name: str = dataclasses.field(default=None)
- training_input_tables: List[str] = dataclasses.field(default=None)
- predict_function_name: str = dataclasses.field(default=None)
- train_module_name: str = dataclasses.field(default=None)
- predict_module_name: str = dataclasses.field(default=None)
- test_split: int = dataclasses.field(default=None)
+ timeout_minutes: int = dataclasses.field(default=None)
def __post_init__(self):
self.problem_type = enums.ProblemType.CUSTOM_ALGORITHM
diff --git a/abacusai/api_class/monitor.py b/abacusai/api_class/monitor.py
index 197bffb94..2d75fa864 100644
--- a/abacusai/api_class/monitor.py
+++ b/abacusai/api_class/monitor.py
@@ -1,4 +1,5 @@
import dataclasses
+from typing import List
from .abstract import ApiClass
from .enums import StdDevThresholdType
@@ -90,3 +91,47 @@ def to_dict(self):
'lower_bound': StdDevThreshold.from_dict(self.lower_bound).to_dict() if self.lower_bound else None,
'upper_bound': StdDevThreshold.from_dict(self.upper_bound).to_dict() if self.upper_bound else None,
}
+
+
+@dataclasses.dataclass
+class RestrictFeatureMappings(ApiClass):
+ """
+ Restrict Feature Mappings for Monitor Filtering
+
+ Args:
+ feature_name (str): The name of the feature to restrict the monitor to.
+ restricted_feature_values (list): The values of the feature to restrict the monitor to.
+ """
+ feature_name: str = dataclasses.field(default=None)
+ restricted_feature_values: list = dataclasses.field(default_factory=list)
+
+ def to_dict(self):
+ return {
+ 'feature_name': self.feature_name,
+ 'restricted_feature_values': self.restricted_feature_values,
+ }
+
+
+@dataclasses.dataclass
+class MonitorFilteringConfig(ApiClass):
+ """
+ Monitor Filtering Configuration
+
+ Args:
+ start_time (str): The start time of the prediction time col
+ end_time (str): The end time of the prediction time col
+ restrict_feature_mapping (RestrictFeatureMappings): The feature mapping to restrict the monitor to.
+ target_class (str): The target class to restrict the monitor to.
+ """
+ start_time: str = dataclasses.field(default=None)
+ end_time: str = dataclasses.field(default=None)
+ restrict_feature_mappings: List[RestrictFeatureMappings] = dataclasses.field(default=None)
+ target_class: str = dataclasses.field(default=None)
+
+ def to_dict(self):
+ return {
+ 'start_time': self.start_time,
+ 'end_time': self.end_time,
+ 'restrict_feature_mappings': [RestrictFeatureMappings.from_dict(item).to_dict() for item in self.restrict_feature_mappings] if self.restrict_feature_mappings else None,
+ 'target_class': self.target_class,
+ }
diff --git a/abacusai/batch_prediction.py b/abacusai/batch_prediction.py
index 9446233cd..a6e5b25e5 100644
--- a/abacusai/batch_prediction.py
+++ b/abacusai/batch_prediction.py
@@ -81,7 +81,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
BatchPredictionArgs, globalPredictionArgs)
self.batch_prediction_args = client._build_class(
BatchPredictionArgs, batchPredictionArgs)
- self.deprecated_keys = {'explanations', 'global_prediction_args'}
+ self.deprecated_keys = {'global_prediction_args', 'explanations'}
def __repr__(self):
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'explanations': repr(self.explanations), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(
diff --git a/abacusai/batch_prediction_version.py b/abacusai/batch_prediction_version.py
index 281a6109e..24d60d51d 100644
--- a/abacusai/batch_prediction_version.py
+++ b/abacusai/batch_prediction_version.py
@@ -95,7 +95,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
BatchPredictionArgs, globalPredictionArgs)
self.batch_prediction_args = client._build_class(
BatchPredictionArgs, batchPredictionArgs)
- self.deprecated_keys = {'explanations', 'global_prediction_args'}
+ self.deprecated_keys = {'global_prediction_args', 'explanations'}
def __repr__(self):
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'explanations': repr(self.explanations), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(
diff --git a/abacusai/client.py b/abacusai/client.py
index a2fad2201..138ab96fc 100644
--- a/abacusai/client.py
+++ b/abacusai/client.py
@@ -38,7 +38,7 @@
DocumentRetrieverConfig, EvalArtifactType, FeatureGroupExportConfig,
ForecastingMonitorConfig, IncrementalDatabaseConnectorConfig, LLMName,
MergeConfig, ParsingConfig, PredictionArguments, ProblemType,
- PythonFunctionType, SamplingConfig, TrainingConfig
+ PythonFunctionType, SamplingConfig, TrainingConfig, WorkflowGraph
)
from .api_client_utils import (
INVALID_PANDAS_COLUMN_NAME_CHARACTERS, StreamingHandler, clean_column_name,
@@ -89,7 +89,6 @@
from .feature import Feature
from .feature_distribution import FeatureDistribution
from .feature_group import FeatureGroup
-from .feature_group_document import FeatureGroupDocument
from .feature_group_export import FeatureGroupExport
from .feature_group_export_config import FeatureGroupExportConfig
from .feature_group_export_download_url import FeatureGroupExportDownloadUrl
@@ -585,7 +584,7 @@ class BaseApiClient:
client_options (ClientOptions): Optional API client configurations
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
"""
- client_version = '1.2.1'
+ client_version = '1.2.2'
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False):
self.api_key = api_key
@@ -2195,16 +2194,6 @@ def get_data(self, feature_group_id: str, primary_key: str = None, num_rows: int
list[FeatureGroupRow]: A list of feature group rows."""
return self._proxy_request('getData', 'GET', query_params={'featureGroupId': feature_group_id, 'primaryKey': primary_key, 'numRows': num_rows}, parse_type=FeatureGroupRow, is_sync=True)
- def list_pending_feature_group_documents(self, feature_group_id: str) -> List[FeatureGroupDocument]:
- """Lists all pending documents added to feature group.
-
- Args:
- feature_group_id (str): The unique ID associated with the feature group.
-
- Returns:
- list[FeatureGroupDocument]: A list of pending feature group documents."""
- return self._call_api('listPendingFeatureGroupDocuments', 'GET', query_params={'featureGroupId': feature_group_id}, parse_type=FeatureGroupDocument)
-
def describe_python_function(self, name: str) -> PythonFunction:
"""Describe a Python Function.
@@ -2591,6 +2580,28 @@ def search_feature_groups(self, text: str, num_results: int = 10, project_id: st
list[OrganizationSearchResult]: A list of search results, each containing the retrieved object and its relevance score"""
return self._call_api('searchFeatureGroups', 'GET', query_params={'text': text, 'numResults': num_results, 'projectId': project_id, 'featureGroupIds': feature_group_ids}, parse_type=OrganizationSearchResult)
+ def list_agents(self, project_id: str) -> List[Agent]:
+ """Retrieves the list of agents in the specified project.
+
+ Args:
+ project_id (str): The unique identifier associated with the project.
+
+ Returns:
+ list[Agent]: A list of agents in the project."""
+ return self._call_api('listAgents', 'GET', query_params={'projectId': project_id}, parse_type=Agent)
+
+ def list_agent_versions(self, agent_id: str, limit: int = 100, start_after_version: str = None) -> List[AgentVersion]:
+ """List all versions of an agent.
+
+ Args:
+ agent_id (str): The unique identifier associated with the agent.
+ limit (int): If provided, limits the number of agent versions returned.
+ start_after_version (str): Unique string identifier of the version after which the list starts.
+
+ Returns:
+ list[AgentVersion]: An array of Agent versions."""
+ return self._call_api('listAgentVersions', 'GET', query_params={'agentId': agent_id, 'limit': limit, 'startAfterVersion': start_after_version}, parse_type=AgentVersion)
+
def list_document_retrievers(self, project_id: str, limit: int = 100, start_after_id: str = None) -> List[DocumentRetriever]:
"""List all the document retrievers.
@@ -3793,7 +3804,7 @@ def _cached_doc_retriever_deployment_info(document_retriever_id: str, ttl_hash:
return _cached_doc_retriever_deployment_info(document_retriever_id, ttl_hash=time.time() // ttl_seconds)
def get_matching_documents(self, document_retriever_id: str, query: str, filters: dict = None, limit: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None,
- max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None) -> List[DocumentRetrieverLookupResult]:
+ max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None) -> List[DocumentRetrieverLookupResult]:
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
@@ -3812,13 +3823,14 @@ def get_matching_documents(self, document_retriever_id: str, query: str, filters
max_words_per_chunk (int): If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
score_multiplier_column (str): If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
min_score (float): If provided, will filter out the results with score lower than the value specified.
+ required_phrases (list): If provided, each result will have at least one of the phrases.
Returns:
list[DocumentRetrieverLookupResult]: The relevant documentation results found from the document retriever."""
deployment_token, deployment_id = self._get_doc_retriever_deployment_info(
document_retriever_id)
- return self.lookup_matches(deployment_token, deployment_id, query, filters, limit if limit is not None else 10, result_columns, max_words, num_retrieval_margin_words, max_words_per_chunk, score_multiplier_column, min_score)
+ return self.lookup_matches(deployment_token, deployment_id, query, filters, limit if limit is not None else 10, result_columns, max_words, num_retrieval_margin_words, max_words_per_chunk, score_multiplier_column, min_score, required_phrases)
def create_model_from_files(self, project_id: str, location: str, name: str = None, custom_artifact_filenames: dict = {}, model_config: dict = {}) -> Model:
"""Creates a new Model and returns Upload IDs for uploading the model artifacts.
@@ -4210,7 +4222,7 @@ def create_feature_group_from_function(self, table_name: str, function_source_co
package_requirements (list): List of package requirements for the feature group function. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
use_original_csv_names (bool): Defaults to False, if set it uses the original column names for input feature groups from CSV datasets.
python_function_name (str): Name of Python Function that contains the source code and function arguments.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ python_function_bindings (list): List of python function arguments.
use_gpu (bool): Whether the feature group needs a gpu or not. Otherwise default to CPU.
Returns:
@@ -4725,10 +4737,10 @@ def update_feature_group_python_function_bindings(self, feature_group_id: str, p
Args:
feature_group_id (str): The unique ID associated with the feature group.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}]."""
+ python_function_bindings (list): List of python function arguments."""
return self._call_api('updateFeatureGroupPythonFunctionBindings', 'PATCH', query_params={}, body={'featureGroupId': feature_group_id, 'pythonFunctionBindings': python_function_bindings})
- def update_feature_group_python_function(self, feature_group_id: str, python_function_name: str, python_function_bindings: list = []):
+ def update_feature_group_python_function(self, feature_group_id: str, python_function_name: str, python_function_bindings: list = None, cpu_size: str = None, memory: int = None, use_gpu: bool = None):
"""Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function
bindings, we will provide as arguments to the function DataFrame's (pandas in the case of Python) with the materialized
@@ -4738,8 +4750,11 @@ def update_feature_group_python_function(self, feature_group_id: str, python_fun
Args:
feature_group_id (str): The unique ID associated with the feature group.
python_function_name (str): The name of the python function to be associated with the feature group.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}]."""
- return self._call_api('updateFeatureGroupPythonFunction', 'PATCH', query_params={}, body={'featureGroupId': feature_group_id, 'pythonFunctionName': python_function_name, 'pythonFunctionBindings': python_function_bindings})
+ python_function_bindings (list): List of python function arguments.
+ cpu_size (str): Size of the CPU for the feature group python function.
+ memory (int): Memory (in GB) for the feature group python function.
+ use_gpu (bool): Whether the feature group needs a gpu or not. Otherwise default to CPU."""
+ return self._call_api('updateFeatureGroupPythonFunction', 'PATCH', query_params={}, body={'featureGroupId': feature_group_id, 'pythonFunctionName': python_function_name, 'pythonFunctionBindings': python_function_bindings, 'cpuSize': cpu_size, 'memory': memory, 'useGpu': use_gpu})
def update_feature_group_sql_definition(self, feature_group_id: str, sql: str) -> FeatureGroup:
"""Updates the SQL statement for a feature group.
@@ -4775,7 +4790,7 @@ def update_feature_group_function_definition(self, feature_group_id: str, functi
memory (int): Memory (in GB) for the feature group function.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
use_original_csv_names (bool): If set to `True`, feature group uses the original column names for input feature groups from CSV datasets.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ python_function_bindings (list): List of PythonFunctionArgument objects that represent the bindings for the Python function.
use_gpu (bool): Whether the feature group needs a gpu or not. Otherwise default to CPU.
Returns:
@@ -5400,7 +5415,7 @@ def rename_model(self, model_id: str, name: str):
name (str): The new name to assign to the model."""
return self._call_api('renameModel', 'PATCH', query_params={}, body={'modelId': model_id, 'name': name})
- def update_python_model(self, model_id: str, function_source_code: str = None, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, initialize_function_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None, is_thread_safe: bool = None) -> Model:
+ def update_python_model(self, model_id: str, function_source_code: str = None, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, initialize_function_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None, is_thread_safe: bool = None, training_config: Union[dict, TrainingConfig] = None) -> Model:
"""Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.
This method expects `functionSourceCode` to be a valid language source file which contains the functions named `trainFunctionName` and `predictFunctionName`. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName`. `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.
@@ -5419,10 +5434,11 @@ def update_python_model(self, model_id: str, function_source_code: str = None, t
package_requirements (list): List of package requirement strings. For example: `['numpy==1.2.3', 'pandas>=1.4.0']`.
use_gpu (bool): Whether this model needs gpu
is_thread_safe (bool): Whether this model is thread safe
+ training_config (TrainingConfig): The training config used to train this model.
Returns:
Model: The updated model."""
- return self._call_api('updatePythonModel', 'POST', query_params={}, body={'modelId': model_id, 'functionSourceCode': function_source_code, 'trainFunctionName': train_function_name, 'predictFunctionName': predict_function_name, 'predictManyFunctionName': predict_many_function_name, 'initializeFunctionName': initialize_function_name, 'trainingInputTables': training_input_tables, 'cpuSize': cpu_size, 'memory': memory, 'packageRequirements': package_requirements, 'useGpu': use_gpu, 'isThreadSafe': is_thread_safe}, parse_type=Model)
+ return self._call_api('updatePythonModel', 'POST', query_params={}, body={'modelId': model_id, 'functionSourceCode': function_source_code, 'trainFunctionName': train_function_name, 'predictFunctionName': predict_function_name, 'predictManyFunctionName': predict_many_function_name, 'initializeFunctionName': initialize_function_name, 'trainingInputTables': training_input_tables, 'cpuSize': cpu_size, 'memory': memory, 'packageRequirements': package_requirements, 'useGpu': use_gpu, 'isThreadSafe': is_thread_safe, 'trainingConfig': training_config}, parse_type=Model)
def update_python_model_zip(self, model_id: str, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, train_module_name: str = None, predict_module_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None) -> Upload:
"""Updates an existing Python Model using a provided zip file. If a list of input feature groups are supplied, they will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.
@@ -6640,18 +6656,19 @@ def get_assignments(self, deployment_token: str, deployment_id: str, query_data:
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'forcedAssignments': forced_assignments, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'includeAllAssignments': include_all_assignments}, server_override=prediction_url)
- def get_alternative_assignments(self, deployment_token: str, deployment_id: str, query_data: dict, add_constraints: list = None, solve_time_limit_seconds: float = None) -> Dict:
+ def get_alternative_assignments(self, deployment_token: str, deployment_id: str, query_data: dict, add_constraints: list = None, solve_time_limit_seconds: float = None, best_alternate_only: bool = False) -> Dict:
"""Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
Args:
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
deployment_id (str): The unique identifier of a deployment created under the project.
query_data (dict): Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
- add_constraints (list): List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
- solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query."""
+ add_constraints (list): List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
+ solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query.
+ best_alternate_only (bool): When True only the best alternate will be returned, when False multiple alternates are returned"""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds}, server_override=prediction_url)
+ return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'bestAlternateOnly': best_alternate_only}, server_override=prediction_url)
def check_constraints(self, deployment_token: str, deployment_id: str, query_data: dict) -> Dict:
"""Check for any constraints violated by the overrides.
@@ -6821,7 +6838,7 @@ def execute_conversation_agent(self, deployment_token: str, deployment_id: str,
deployment_id, deployment_token) if deployment_token else None
return self._call_api('executeConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos}, server_override=prediction_url)
- def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None) -> List[DocumentRetrieverLookupResult]:
+ def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None) -> List[DocumentRetrieverLookupResult]:
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
@@ -6841,12 +6858,13 @@ def lookup_matches(self, deployment_token: str, deployment_id: str, data: str =
max_words_per_chunk (int): If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
score_multiplier_column (str): If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
min_score (float): If provided, will filter out the results with score less than the value specified.
+ required_phrases (list): If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
Returns:
list[DocumentRetrieverLookupResult]: The relevant documentation results found from the document retriever."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('lookupMatches', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'data': data, 'filters': filters, 'num': num, 'resultColumns': result_columns, 'maxWords': max_words, 'numRetrievalMarginWords': num_retrieval_margin_words, 'maxWordsPerChunk': max_words_per_chunk, 'scoreMultiplierColumn': score_multiplier_column, 'minScore': min_score}, parse_type=DocumentRetrieverLookupResult, server_override=prediction_url)
+ return self._call_api('lookupMatches', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'data': data, 'filters': filters, 'num': num, 'resultColumns': result_columns, 'maxWords': max_words, 'numRetrievalMarginWords': num_retrieval_margin_words, 'maxWordsPerChunk': max_words_per_chunk, 'scoreMultiplierColumn': score_multiplier_column, 'minScore': min_score, 'requiredPhrases': required_phrases}, parse_type=DocumentRetrieverLookupResult, server_override=prediction_url)
def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, blobs: None = None) -> Dict:
"""Executes a deployed AI agent function with binary data as inputs.
@@ -7084,17 +7102,6 @@ def delete_data(self, feature_group_id: str, primary_key: str):
primary_key (str): The primary key value for which to delete the feature group row"""
return self._call_api('deleteData', 'DELETE', query_params={'featureGroupId': feature_group_id, 'primaryKey': primary_key})
- def add_feature_group_document(self, feature_group_id: str, document: io.TextIOBase) -> FeatureGroupDocument:
- """Adds a document to the feature group.
-
- Args:
- feature_group_id (str): The unique ID associated with the feature group.
- document (io.TextIOBase): The multipart/form-data of the document to add to the feature group.
-
- Returns:
- FeatureGroupDocument: The feature group document that was added."""
- return self._call_api('addFeatureGroupDocument', 'PUT', query_params={'featureGroupId': feature_group_id}, parse_type=FeatureGroupDocument, files={'document': document})
-
def describe_feature_group_row_process_by_key(self, deployment_id: str, primary_key_value: str) -> FeatureGroupRowProcess:
"""Gets the feature group row process.
@@ -7294,7 +7301,7 @@ def create_pipeline_step(self, pipeline_id: str, step_name: str, function_name:
function_name (str): The name of the Python function.
source_code (str): Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
step_input_mappings (list): List of Python function arguments.
- output_variable_mappings (list): List of Python function ouputs.
+ output_variable_mappings (list): List of Python function outputs.
step_dependencies (list): List of step names this step depends on.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
cpu_size (str): Size of the CPU for the step function.
@@ -7320,7 +7327,7 @@ def update_pipeline_step(self, pipeline_step_id: str, function_name: str = None,
function_name (str): The name of the Python function.
source_code (str): Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
step_input_mappings (list): List of Python function arguments.
- output_variable_mappings (list): List of Python function ouputs.
+ output_variable_mappings (list): List of Python function outputs.
step_dependencies (list): List of step names this step depends on.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
cpu_size (str): Size of the CPU for the step function.
@@ -7858,7 +7865,7 @@ def delete_external_application(self, external_application_id: str):
external_application_id (str): The ID of the External Application."""
return self._call_api('deleteExternalApplication', 'DELETE', query_params={'externalApplicationId': external_application_id})
- def create_agent(self, project_id: str, function_source_code: str, agent_function_name: str, name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, evaluation_feature_group_id: str = None, agent_input_schema: dict = None, agent_output_schema: dict = None) -> Agent:
+ def create_agent(self, project_id: str, function_source_code: str = None, agent_function_name: str = None, name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, evaluation_feature_group_id: str = None, agent_input_schema: dict = None, agent_output_schema: dict = None, workflow_graph: Union[dict, WorkflowGraph] = None) -> Agent:
"""Creates a new AI agent.
Args:
@@ -7873,12 +7880,13 @@ def create_agent(self, project_id: str, function_source_code: str, agent_functio
evaluation_feature_group_id (str): The ID of the feature group to use for evaluation.
agent_input_schema (dict): The schema of the input data for the agent, which conforms to the react-json-schema-form standard.
agent_output_schema (dict): The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
+ workflow_graph (WorkflowGraph): The workflow graph for the agent.
Returns:
Agent: The new agent"""
- return self._call_api('createAgent', 'POST', query_params={}, body={'projectId': project_id, 'functionSourceCode': function_source_code, 'agentFunctionName': agent_function_name, 'name': name, 'memory': memory, 'packageRequirements': package_requirements, 'description': description, 'enableBinaryInput': enable_binary_input, 'evaluationFeatureGroupId': evaluation_feature_group_id, 'agentInputSchema': agent_input_schema, 'agentOutputSchema': agent_output_schema}, parse_type=Agent)
+ return self._call_api('createAgent', 'POST', query_params={}, body={'projectId': project_id, 'functionSourceCode': function_source_code, 'agentFunctionName': agent_function_name, 'name': name, 'memory': memory, 'packageRequirements': package_requirements, 'description': description, 'enableBinaryInput': enable_binary_input, 'evaluationFeatureGroupId': evaluation_feature_group_id, 'agentInputSchema': agent_input_schema, 'agentOutputSchema': agent_output_schema, 'workflowGraph': workflow_graph}, parse_type=Agent)
- def update_agent(self, model_id: str, function_source_code: str = None, agent_function_name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, agent_input_schema: dict = None, agent_output_schema: dict = None) -> Agent:
+ def update_agent(self, model_id: str, function_source_code: str = None, agent_function_name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, agent_input_schema: dict = None, agent_output_schema: dict = None, workflow_graph: Union[dict, WorkflowGraph] = None) -> Agent:
"""Updates an existing AI Agent using user-provided Python code. A new version of the agent will be created and published.
Args:
@@ -7891,10 +7899,11 @@ def update_agent(self, model_id: str, function_source_code: str = None, agent_fu
enable_binary_input (bool): If True, the agent will be able to accept binary data as inputs.
agent_input_schema (dict): The schema of the input data for the agent, which conforms to the react-json-schema-form standard.
agent_output_schema (dict): The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
+ workflow_graph (WorkflowGraph): The workflow graph for the agent.
Returns:
Agent: The updated agent"""
- return self._call_api('updateAgent', 'POST', query_params={}, body={'modelId': model_id, 'functionSourceCode': function_source_code, 'agentFunctionName': agent_function_name, 'memory': memory, 'packageRequirements': package_requirements, 'description': description, 'enableBinaryInput': enable_binary_input, 'agentInputSchema': agent_input_schema, 'agentOutputSchema': agent_output_schema}, parse_type=Agent)
+ return self._call_api('updateAgent', 'POST', query_params={}, body={'modelId': model_id, 'functionSourceCode': function_source_code, 'agentFunctionName': agent_function_name, 'memory': memory, 'packageRequirements': package_requirements, 'description': description, 'enableBinaryInput': enable_binary_input, 'agentInputSchema': agent_input_schema, 'agentOutputSchema': agent_output_schema, 'workflowGraph': workflow_graph}, parse_type=Agent)
def evaluate_prompt(self, prompt: str = None, system_message: str = None, llm_name: Union[LLMName, str] = None, max_tokens: int = None, temperature: float = 0.0, messages: list = None, response_type: str = None, json_response_schema: dict = None) -> LlmResponse:
"""Generate response to the prompt using the specified model.
diff --git a/abacusai/deployment_conversation_event.py b/abacusai/deployment_conversation_event.py
index 4c96ae0e6..9966ed435 100644
--- a/abacusai/deployment_conversation_event.py
+++ b/abacusai/deployment_conversation_event.py
@@ -22,13 +22,14 @@ class DeploymentConversationEvent(AbstractApiClass):
inputParams (dict): User message only. A dictionary of input parameters used to generate response.
attachments (list): A list of attachments associated with the message.
responseVersion (str): The version of the response, used to differentiate w/ legacy agent response.
+ agentWorkflowNodeKey (str): The workflow node key associated with the agent response.
chatType (str): The type of chat llm that was run for the message.
agentResponse (dict): Response from the agent. Only for conversation with agents.
error (str): The error message in case of an error.
segments (list): The segments of the message.
"""
- def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, chatType=None, agentResponse=None, error=None, segments=None):
+ def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeKey=None, chatType=None, agentResponse=None, error=None, segments=None):
super().__init__(client, None)
self.role = role
self.text = text
@@ -45,6 +46,7 @@ def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=No
self.input_params = inputParams
self.attachments = attachments
self.response_version = responseVersion
+ self.agent_workflow_node_key = agentWorkflowNodeKey
self.chat_type = chatType
self.agent_response = agentResponse
self.error = error
@@ -52,8 +54,8 @@ def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=No
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'message_index': repr(self.message_index), f'regenerate_attempt': repr(self.regenerate_attempt), f'model_version': repr(self.model_version), f'search_results': repr(self.search_results), f'is_useful': repr(self.is_useful), f'feedback': repr(self.feedback), f'feedback_type': repr(
- self.feedback_type), f'doc_infos': repr(self.doc_infos), f'keyword_arguments': repr(self.keyword_arguments), f'input_params': repr(self.input_params), f'attachments': repr(self.attachments), f'response_version': repr(self.response_version), f'chat_type': repr(self.chat_type), f'agent_response': repr(self.agent_response), f'error': repr(self.error), f'segments': repr(self.segments)}
+ repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'message_index': repr(self.message_index), f'regenerate_attempt': repr(self.regenerate_attempt), f'model_version': repr(self.model_version), f'search_results': repr(self.search_results), f'is_useful': repr(self.is_useful), f'feedback': repr(self.feedback), f'feedback_type': repr(self.feedback_type), f'doc_infos': repr(
+ self.doc_infos), f'keyword_arguments': repr(self.keyword_arguments), f'input_params': repr(self.input_params), f'attachments': repr(self.attachments), f'response_version': repr(self.response_version), f'agent_workflow_node_key': repr(self.agent_workflow_node_key), f'chat_type': repr(self.chat_type), f'agent_response': repr(self.agent_response), f'error': repr(self.error), f'segments': repr(self.segments)}
class_name = "DeploymentConversationEvent"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -66,6 +68,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'message_index': self.message_index, 'regenerate_attempt': self.regenerate_attempt, 'model_version': self.model_version, 'search_results': self.search_results, 'is_useful': self.is_useful, 'feedback': self.feedback, 'feedback_type': self.feedback_type,
- 'doc_infos': self.doc_infos, 'keyword_arguments': self.keyword_arguments, 'input_params': self.input_params, 'attachments': self.attachments, 'response_version': self.response_version, 'chat_type': self.chat_type, 'agent_response': self.agent_response, 'error': self.error, 'segments': self.segments}
+ resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'message_index': self.message_index, 'regenerate_attempt': self.regenerate_attempt, 'model_version': self.model_version, 'search_results': self.search_results, 'is_useful': self.is_useful, 'feedback': self.feedback, 'feedback_type': self.feedback_type, 'doc_infos': self.doc_infos,
+ 'keyword_arguments': self.keyword_arguments, 'input_params': self.input_params, 'attachments': self.attachments, 'response_version': self.response_version, 'agent_workflow_node_key': self.agent_workflow_node_key, 'chat_type': self.chat_type, 'agent_response': self.agent_response, 'error': self.error, 'segments': self.segments}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/document_retriever.py b/abacusai/document_retriever.py
index 8dbc52a14..65fe14ec8 100644
--- a/abacusai/document_retriever.py
+++ b/abacusai/document_retriever.py
@@ -206,11 +206,11 @@ def get_deployment_status(self):
"""
return self.describe().latest_document_retriever_version.deployment_status
- def get_matching_documents(self, query: str, filters: dict = None, limit: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None):
+ def get_matching_documents(self, query: str, filters: dict = None, limit: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None):
"""
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
- Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
+ Original documents are split into chunks and stored in the document retriever. This lookup function will return the relevant chunks
from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.
@@ -224,8 +224,10 @@ def get_matching_documents(self, query: str, filters: dict = None, limit: int =
num_retrieval_margin_words (int): If provided, will add this number of words from left and right of the returned chunks.
max_words_per_chunk (int): If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
score_multiplier_column (str): If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
+ min_score (float): If provided, will filter out the results with score lower than the value specified.
+ required_phrases (list): If provided, each result will have at least one of the phrases.
Returns:
list[DocumentRetrieverLookupResult]: The relevant documentation results found from the document retriever.
"""
- return self.client.get_matching_documents(self.document_retriever_id, query, filters, limit, result_columns, max_words, num_retrieval_margin_words, max_words_per_chunk, score_multiplier_column)
+ return self.client.get_matching_documents(self.document_retriever_id, query, filters, limit, result_columns, max_words, num_retrieval_margin_words, max_words_per_chunk, score_multiplier_column, min_score, required_phrases)
diff --git a/abacusai/drift_distributions.py b/abacusai/drift_distributions.py
index f106b2a84..827bf92fa 100644
--- a/abacusai/drift_distributions.py
+++ b/abacusai/drift_distributions.py
@@ -10,18 +10,21 @@ class DriftDistributions(AbstractApiClass):
client (ApiClient): An authenticated API Client instance
labelDrift (DriftDistribution): A DriftDistribution describing column names and the range of values for label drift.
predictionDrift (DriftDistribution): A DriftDistribution describing column names and the range of values for prediction drift.
+ bpPredictionDrift (DriftDistribution): A DriftDistribution describing column names and the range of values for prediction drift, when the predictions come from BP.
"""
- def __init__(self, client, labelDrift={}, predictionDrift={}):
+ def __init__(self, client, labelDrift={}, predictionDrift={}, bpPredictionDrift={}):
super().__init__(client, None)
self.label_drift = client._build_class(DriftDistribution, labelDrift)
self.prediction_drift = client._build_class(
DriftDistribution, predictionDrift)
+ self.bp_prediction_drift = client._build_class(
+ DriftDistribution, bpPredictionDrift)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'label_drift': repr(
- self.label_drift), f'prediction_drift': repr(self.prediction_drift)}
+ repr_dict = {f'label_drift': repr(self.label_drift), f'prediction_drift': repr(
+ self.prediction_drift), f'bp_prediction_drift': repr(self.bp_prediction_drift)}
class_name = "DriftDistributions"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -34,6 +37,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'label_drift': self._get_attribute_as_dict(
- self.label_drift), 'prediction_drift': self._get_attribute_as_dict(self.prediction_drift)}
+ resp = {'label_drift': self._get_attribute_as_dict(self.label_drift), 'prediction_drift': self._get_attribute_as_dict(
+ self.prediction_drift), 'bp_prediction_drift': self._get_attribute_as_dict(self.bp_prediction_drift)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/feature_group.py b/abacusai/feature_group.py
index 21c19ac27..712aa1256 100644
--- a/abacusai/feature_group.py
+++ b/abacusai/feature_group.py
@@ -790,11 +790,11 @@ def update_python_function_bindings(self, python_function_bindings: list):
Updates an existing Feature Group's Python function bindings from a user-provided Python Function. If a list of feature groups are supplied within the Python function bindings, we will provide DataFrames (Pandas in the case of Python) with the materialized feature groups for those input feature groups as arguments to the function.
Args:
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ python_function_bindings (list): List of python function arguments.
"""
return self.client.update_feature_group_python_function_bindings(self.feature_group_id, python_function_bindings)
- def update_python_function(self, python_function_name: str, python_function_bindings: list = []):
+ def update_python_function(self, python_function_name: str, python_function_bindings: list = None, cpu_size: str = None, memory: int = None, use_gpu: bool = None):
"""
Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function
@@ -804,9 +804,12 @@ def update_python_function(self, python_function_name: str, python_function_bind
Args:
python_function_name (str): The name of the python function to be associated with the feature group.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ python_function_bindings (list): List of python function arguments.
+ cpu_size (str): Size of the CPU for the feature group python function.
+ memory (int): Memory (in GB) for the feature group python function.
+ use_gpu (bool): Whether the feature group needs a gpu or not. Otherwise default to CPU.
"""
- return self.client.update_feature_group_python_function(self.feature_group_id, python_function_name, python_function_bindings)
+ return self.client.update_feature_group_python_function(self.feature_group_id, python_function_name, python_function_bindings, cpu_size, memory, use_gpu)
def update_sql_definition(self, sql: str):
"""
@@ -844,7 +847,7 @@ def update_function_definition(self, function_source_code: str = None, function_
memory (int): Memory (in GB) for the feature group function.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
use_original_csv_names (bool): If set to `True`, feature group uses the original column names for input feature groups from CSV datasets.
- python_function_bindings (list): List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ python_function_bindings (list): List of PythonFunctionArgument objects that represent the bindings for the Python function.
use_gpu (bool): Whether the feature group needs a gpu or not. Otherwise default to CPU.
Returns:
@@ -1088,30 +1091,6 @@ def get_data(self, primary_key: str = None, num_rows: int = None):
"""
return self.client.get_data(self.feature_group_id, primary_key, num_rows)
- def add_document(self, document: io.TextIOBase):
- """
- Adds a document to the feature group.
-
- Args:
- document (io.TextIOBase): The multipart/form-data of the document to add to the feature group.
-
- Returns:
- FeatureGroupDocument: The feature group document that was added.
- """
- return self.client.add_feature_group_document(self.feature_group_id, document)
-
- def list_pending_documents(self):
- """
- Lists all pending documents added to feature group.
-
- Args:
- feature_group_id (str): The unique ID associated with the feature group.
-
- Returns:
- list[FeatureGroupDocument]: A list of pending feature group documents.
- """
- return self.client.list_pending_feature_group_documents(self.feature_group_id)
-
def get_natural_language_explanation(self, feature_group_version: str = None, model_id: str = None):
"""
Returns the saved natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model
diff --git a/abacusai/feature_group_version.py b/abacusai/feature_group_version.py
index bdd958a3e..e2717ce6b 100644
--- a/abacusai/feature_group_version.py
+++ b/abacusai/feature_group_version.py
@@ -272,11 +272,11 @@ def get_docstore_resource_bytes(feature_group_version, resource_type, archive_id
return bytes
def get_document_processing_result_infos(content_hash_list, document_processing_config, document_processing_version=None):
- return self.client._call_api('_getDocumentProcessingResultInfos', 'POST',
- body={'contentHashList': content_hash_list,
- 'documentProcessingConfig': document_processing_config,
- 'documentProcessingVersion': document_processing_version},
- retry_500=True)
+ return self.client._proxy_request('_getDocumentProcessingResultInfos', 'POST',
+ body={'contentHashList': content_hash_list,
+ 'documentProcessingConfig': document_processing_config,
+ 'documentProcessingVersion': document_processing_version},
+ is_sync=True)
feature_group_version = self.id
df = self.load_as_pandas(max_workers=max_workers)
diff --git a/abacusai/feature_performance_analysis.py b/abacusai/feature_performance_analysis.py
new file mode 100644
index 000000000..f846fc998
--- /dev/null
+++ b/abacusai/feature_performance_analysis.py
@@ -0,0 +1,39 @@
+from .return_class import AbstractApiClass
+
+
+class FeaturePerformanceAnalysis(AbstractApiClass):
+ """
+ A feature performance analysis for Monitor
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ features (list): A list of the features that are being analyzed.
+ featureMetrics (list): A list of dictionary for every feature and its metrics
+ metricsKeys (list): A list of the keys for the metrics.
+ """
+
+ def __init__(self, client, features=None, featureMetrics=None, metricsKeys=None):
+ super().__init__(client, None)
+ self.features = features
+ self.feature_metrics = featureMetrics
+ self.metrics_keys = metricsKeys
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'features': repr(self.features), f'feature_metrics': repr(
+ self.feature_metrics), f'metrics_keys': repr(self.metrics_keys)}
+ class_name = "FeaturePerformanceAnalysis"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'features': self.features, 'feature_metrics': self.feature_metrics,
+ 'metrics_keys': self.metrics_keys}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/messaging_connector_response.py b/abacusai/messaging_connector_response.py
index 0a1a67997..84447c0de 100644
--- a/abacusai/messaging_connector_response.py
+++ b/abacusai/messaging_connector_response.py
@@ -13,9 +13,10 @@ class MessagingConnectorResponse(AbstractApiClass):
messagingBotName (str): the name you want to see at various places instead of Abacus.ai
useDefaultLabel (bool): to use the default abacus.ai label in case it is set to true
initAckReq (bool): Set to true if the initial Acknowledgment for the query is required by the user
+ defaultLabels (dict): Dictionary of default labels, if the user-specified labels aren't set
"""
- def __init__(self, client, welcomeMessage=None, defaultMessage=None, disclaimer=None, messagingBotName=None, useDefaultLabel=None, initAckReq=None):
+ def __init__(self, client, welcomeMessage=None, defaultMessage=None, disclaimer=None, messagingBotName=None, useDefaultLabel=None, initAckReq=None, defaultLabels=None):
super().__init__(client, None)
self.welcome_message = welcomeMessage
self.default_message = defaultMessage
@@ -23,11 +24,12 @@ def __init__(self, client, welcomeMessage=None, defaultMessage=None, disclaimer=
self.messaging_bot_name = messagingBotName
self.use_default_label = useDefaultLabel
self.init_ack_req = initAckReq
+ self.default_labels = defaultLabels
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'welcome_message': repr(self.welcome_message), f'default_message': repr(self.default_message), f'disclaimer': repr(
- self.disclaimer), f'messaging_bot_name': repr(self.messaging_bot_name), f'use_default_label': repr(self.use_default_label), f'init_ack_req': repr(self.init_ack_req)}
+ repr_dict = {f'welcome_message': repr(self.welcome_message), f'default_message': repr(self.default_message), f'disclaimer': repr(self.disclaimer), f'messaging_bot_name': repr(
+ self.messaging_bot_name), f'use_default_label': repr(self.use_default_label), f'init_ack_req': repr(self.init_ack_req), f'default_labels': repr(self.default_labels)}
class_name = "MessagingConnectorResponse"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -40,6 +42,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'welcome_message': self.welcome_message, 'default_message': self.default_message, 'disclaimer': self.disclaimer,
- 'messaging_bot_name': self.messaging_bot_name, 'use_default_label': self.use_default_label, 'init_ack_req': self.init_ack_req}
+ resp = {'welcome_message': self.welcome_message, 'default_message': self.default_message, 'disclaimer': self.disclaimer, 'messaging_bot_name':
+ self.messaging_bot_name, 'use_default_label': self.use_default_label, 'init_ack_req': self.init_ack_req, 'default_labels': self.default_labels}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/model.py b/abacusai/model.py
index 17241a294..4fc44ab69 100644
--- a/abacusai/model.py
+++ b/abacusai/model.py
@@ -1,6 +1,6 @@
from typing import Union
-from .api_class import TrainingConfig
+from .api_class import TrainingConfig, WorkflowGraph
from .code_source import CodeSource
from .database_connector import DatabaseConnector
from .model_location import ModelLocation
@@ -153,7 +153,7 @@ def rename(self, name: str):
"""
return self.client.rename_model(self.model_id, name)
- def update_python(self, function_source_code: str = None, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, initialize_function_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None, is_thread_safe: bool = None):
+ def update_python(self, function_source_code: str = None, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, initialize_function_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None, is_thread_safe: bool = None, training_config: Union[dict, TrainingConfig] = None):
"""
Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.
@@ -172,11 +172,12 @@ def update_python(self, function_source_code: str = None, train_function_name: s
package_requirements (list): List of package requirement strings. For example: `['numpy==1.2.3', 'pandas>=1.4.0']`.
use_gpu (bool): Whether this model needs gpu
is_thread_safe (bool): Whether this model is thread safe
+ training_config (TrainingConfig): The training config used to train this model.
Returns:
Model: The updated model.
"""
- return self.client.update_python_model(self.model_id, function_source_code, train_function_name, predict_function_name, predict_many_function_name, initialize_function_name, training_input_tables, cpu_size, memory, package_requirements, use_gpu, is_thread_safe)
+ return self.client.update_python_model(self.model_id, function_source_code, train_function_name, predict_function_name, predict_many_function_name, initialize_function_name, training_input_tables, cpu_size, memory, package_requirements, use_gpu, is_thread_safe, training_config)
def update_python_zip(self, train_function_name: str = None, predict_function_name: str = None, predict_many_function_name: str = None, train_module_name: str = None, predict_module_name: str = None, training_input_tables: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, use_gpu: bool = None):
"""
@@ -347,7 +348,7 @@ def get_training_types_for_deployment(self, model_version: str = None, algorithm
"""
return self.client.get_model_training_types_for_deployment(self.model_id, model_version, algorithm)
- def update_agent(self, function_source_code: str = None, agent_function_name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, agent_input_schema: dict = None, agent_output_schema: dict = None):
+ def update_agent(self, function_source_code: str = None, agent_function_name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, agent_input_schema: dict = None, agent_output_schema: dict = None, workflow_graph: Union[dict, WorkflowGraph] = None):
"""
Updates an existing AI Agent using user-provided Python code. A new version of the agent will be created and published.
@@ -360,11 +361,12 @@ def update_agent(self, function_source_code: str = None, agent_function_name: st
enable_binary_input (bool): If True, the agent will be able to accept binary data as inputs.
agent_input_schema (dict): The schema of the input data for the agent, which conforms to the react-json-schema-form standard.
agent_output_schema (dict): The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
+ workflow_graph (WorkflowGraph): The workflow graph for the agent.
Returns:
Agent: The updated agent
"""
- return self.client.update_agent(self.model_id, function_source_code, agent_function_name, memory, package_requirements, description, enable_binary_input, agent_input_schema, agent_output_schema)
+ return self.client.update_agent(self.model_id, function_source_code, agent_function_name, memory, package_requirements, description, enable_binary_input, agent_input_schema, agent_output_schema, workflow_graph)
def wait_for_training(self, timeout=None):
"""
diff --git a/abacusai/model_version.py b/abacusai/model_version.py
index b86809d88..45bfd272f 100644
--- a/abacusai/model_version.py
+++ b/abacusai/model_version.py
@@ -37,10 +37,11 @@ class ModelVersion(AbstractApiClass):
useGpu (bool): Whether this model version is using gpu
partialComplete (bool): If true, all required algorithms have completed training.
modelFeatureGroupSchemaMappings (dict): mapping of feature group to schema version
+ trainingConfigUpdated (bool): If the training config has been updated since the instance was created.
codeSource (CodeSource): If a python model, information on where the source code is located.
"""
- def __init__(self, client, modelVersion=None, status=None, modelId=None, modelPredictionConfig=None, trainingStartedAt=None, trainingCompletedAt=None, featureGroupVersions=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, cpuSize=None, memory=None, automlComplete=None, trainingFeatureGroupIds=None, trainingDocumentRetrieverVersions=None, documentRetrieverMappings=None, bestAlgorithm=None, defaultAlgorithm=None, featureAnalysisStatus=None, dataClusterInfo=None, customAlgorithmConfigs=None, trainedModelTypes=None, useGpu=None, partialComplete=None, modelFeatureGroupSchemaMappings=None, codeSource={}, modelConfig={}, deployableAlgorithms={}):
+ def __init__(self, client, modelVersion=None, status=None, modelId=None, modelPredictionConfig=None, trainingStartedAt=None, trainingCompletedAt=None, featureGroupVersions=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, cpuSize=None, memory=None, automlComplete=None, trainingFeatureGroupIds=None, trainingDocumentRetrieverVersions=None, documentRetrieverMappings=None, bestAlgorithm=None, defaultAlgorithm=None, featureAnalysisStatus=None, dataClusterInfo=None, customAlgorithmConfigs=None, trainedModelTypes=None, useGpu=None, partialComplete=None, modelFeatureGroupSchemaMappings=None, trainingConfigUpdated=None, codeSource={}, modelConfig={}, deployableAlgorithms={}):
super().__init__(client, modelVersion)
self.model_version = modelVersion
self.status = status
@@ -67,6 +68,7 @@ def __init__(self, client, modelVersion=None, status=None, modelId=None, modelPr
self.use_gpu = useGpu
self.partial_complete = partialComplete
self.model_feature_group_schema_mappings = modelFeatureGroupSchemaMappings
+ self.training_config_updated = trainingConfigUpdated
self.code_source = client._build_class(CodeSource, codeSource)
self.model_config = client._build_class(TrainingConfig, modelConfig)
self.deployable_algorithms = client._build_class(
@@ -74,8 +76,8 @@ def __init__(self, client, modelVersion=None, status=None, modelId=None, modelPr
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'model_version': repr(self.model_version), f'status': repr(self.status), f'model_id': repr(self.model_id), f'model_prediction_config': repr(self.model_prediction_config), f'training_started_at': repr(self.training_started_at), f'training_completed_at': repr(self.training_completed_at), f'feature_group_versions': repr(self.feature_group_versions), f'error': repr(self.error), f'pending_deployment_ids': repr(self.pending_deployment_ids), f'failed_deployment_ids': repr(self.failed_deployment_ids), f'cpu_size': repr(self.cpu_size), f'memory': repr(self.memory), f'automl_complete': repr(self.automl_complete), f'training_feature_group_ids': repr(self.training_feature_group_ids), f'training_document_retriever_versions': repr(
- self.training_document_retriever_versions), f'document_retriever_mappings': repr(self.document_retriever_mappings), f'best_algorithm': repr(self.best_algorithm), f'default_algorithm': repr(self.default_algorithm), f'feature_analysis_status': repr(self.feature_analysis_status), f'data_cluster_info': repr(self.data_cluster_info), f'custom_algorithm_configs': repr(self.custom_algorithm_configs), f'trained_model_types': repr(self.trained_model_types), f'use_gpu': repr(self.use_gpu), f'partial_complete': repr(self.partial_complete), f'model_feature_group_schema_mappings': repr(self.model_feature_group_schema_mappings), f'code_source': repr(self.code_source), f'model_config': repr(self.model_config), f'deployable_algorithms': repr(self.deployable_algorithms)}
+ repr_dict = {f'model_version': repr(self.model_version), f'status': repr(self.status), f'model_id': repr(self.model_id), f'model_prediction_config': repr(self.model_prediction_config), f'training_started_at': repr(self.training_started_at), f'training_completed_at': repr(self.training_completed_at), f'feature_group_versions': repr(self.feature_group_versions), f'error': repr(self.error), f'pending_deployment_ids': repr(self.pending_deployment_ids), f'failed_deployment_ids': repr(self.failed_deployment_ids), f'cpu_size': repr(self.cpu_size), f'memory': repr(self.memory), f'automl_complete': repr(self.automl_complete), f'training_feature_group_ids': repr(self.training_feature_group_ids), f'training_document_retriever_versions': repr(self.training_document_retriever_versions),
+ f'document_retriever_mappings': repr(self.document_retriever_mappings), f'best_algorithm': repr(self.best_algorithm), f'default_algorithm': repr(self.default_algorithm), f'feature_analysis_status': repr(self.feature_analysis_status), f'data_cluster_info': repr(self.data_cluster_info), f'custom_algorithm_configs': repr(self.custom_algorithm_configs), f'trained_model_types': repr(self.trained_model_types), f'use_gpu': repr(self.use_gpu), f'partial_complete': repr(self.partial_complete), f'model_feature_group_schema_mappings': repr(self.model_feature_group_schema_mappings), f'training_config_updated': repr(self.training_config_updated), f'code_source': repr(self.code_source), f'model_config': repr(self.model_config), f'deployable_algorithms': repr(self.deployable_algorithms)}
class_name = "ModelVersion"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -88,8 +90,8 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'model_version': self.model_version, 'status': self.status, 'model_id': self.model_id, 'model_prediction_config': self.model_prediction_config, 'training_started_at': self.training_started_at, 'training_completed_at': self.training_completed_at, 'feature_group_versions': self.feature_group_versions, 'error': self.error, 'pending_deployment_ids': self.pending_deployment_ids, 'failed_deployment_ids': self.failed_deployment_ids, 'cpu_size': self.cpu_size, 'memory': self.memory, 'automl_complete': self.automl_complete, 'training_feature_group_ids': self.training_feature_group_ids, 'training_document_retriever_versions': self.training_document_retriever_versions,
- 'document_retriever_mappings': self.document_retriever_mappings, 'best_algorithm': self.best_algorithm, 'default_algorithm': self.default_algorithm, 'feature_analysis_status': self.feature_analysis_status, 'data_cluster_info': self.data_cluster_info, 'custom_algorithm_configs': self.custom_algorithm_configs, 'trained_model_types': self.trained_model_types, 'use_gpu': self.use_gpu, 'partial_complete': self.partial_complete, 'model_feature_group_schema_mappings': self.model_feature_group_schema_mappings, 'code_source': self._get_attribute_as_dict(self.code_source), 'model_config': self._get_attribute_as_dict(self.model_config), 'deployable_algorithms': self._get_attribute_as_dict(self.deployable_algorithms)}
+ resp = {'model_version': self.model_version, 'status': self.status, 'model_id': self.model_id, 'model_prediction_config': self.model_prediction_config, 'training_started_at': self.training_started_at, 'training_completed_at': self.training_completed_at, 'feature_group_versions': self.feature_group_versions, 'error': self.error, 'pending_deployment_ids': self.pending_deployment_ids, 'failed_deployment_ids': self.failed_deployment_ids, 'cpu_size': self.cpu_size, 'memory': self.memory, 'automl_complete': self.automl_complete, 'training_feature_group_ids': self.training_feature_group_ids, 'training_document_retriever_versions': self.training_document_retriever_versions, 'document_retriever_mappings': self.document_retriever_mappings,
+ 'best_algorithm': self.best_algorithm, 'default_algorithm': self.default_algorithm, 'feature_analysis_status': self.feature_analysis_status, 'data_cluster_info': self.data_cluster_info, 'custom_algorithm_configs': self.custom_algorithm_configs, 'trained_model_types': self.trained_model_types, 'use_gpu': self.use_gpu, 'partial_complete': self.partial_complete, 'model_feature_group_schema_mappings': self.model_feature_group_schema_mappings, 'training_config_updated': self.training_config_updated, 'code_source': self._get_attribute_as_dict(self.code_source), 'model_config': self._get_attribute_as_dict(self.model_config), 'deployable_algorithms': self._get_attribute_as_dict(self.deployable_algorithms)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
def describe_train_test_data_split_feature_group_version(self):
diff --git a/abacusai/pipeline.py b/abacusai/pipeline.py
index 6bb5f7b9d..d38f3a4ae 100644
--- a/abacusai/pipeline.py
+++ b/abacusai/pipeline.py
@@ -158,7 +158,7 @@ def create_step(self, step_name: str, function_name: str = None, source_code: st
function_name (str): The name of the Python function.
source_code (str): Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
step_input_mappings (list): List of Python function arguments.
- output_variable_mappings (list): List of Python function ouputs.
+ output_variable_mappings (list): List of Python function outputs.
step_dependencies (list): List of step names this step depends on.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
cpu_size (str): Size of the CPU for the step function.
diff --git a/abacusai/pipeline_step.py b/abacusai/pipeline_step.py
index 7e760208d..9934e7bf7 100644
--- a/abacusai/pipeline_step.py
+++ b/abacusai/pipeline_step.py
@@ -78,7 +78,7 @@ def update(self, function_name: str = None, source_code: str = None, step_input_
function_name (str): The name of the Python function.
source_code (str): Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
step_input_mappings (list): List of Python function arguments.
- output_variable_mappings (list): List of Python function ouputs.
+ output_variable_mappings (list): List of Python function outputs.
step_dependencies (list): List of step names this step depends on.
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
cpu_size (str): Size of the CPU for the step function.
diff --git a/abacusai/prediction_client.py b/abacusai/prediction_client.py
index 1c51389df..225383a29 100644
--- a/abacusai/prediction_client.py
+++ b/abacusai/prediction_client.py
@@ -508,18 +508,19 @@ def get_assignments(self, deployment_token: str, deployment_id: str, query_data:
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'forcedAssignments': forced_assignments, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'includeAllAssignments': include_all_assignments}, server_override=prediction_url)
- def get_alternative_assignments(self, deployment_token: str, deployment_id: str, query_data: dict, add_constraints: list = None, solve_time_limit_seconds: float = None) -> Dict:
+ def get_alternative_assignments(self, deployment_token: str, deployment_id: str, query_data: dict, add_constraints: list = None, solve_time_limit_seconds: float = None, best_alternate_only: bool = False) -> Dict:
"""Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
Args:
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
deployment_id (str): The unique identifier of a deployment created under the project.
query_data (dict): Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
- add_constraints (list): List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
- solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query."""
+ add_constraints (list): List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
+ solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query.
+ best_alternate_only (bool): When True only the best alternate will be returned, when False multiple alternates are returned"""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds}, server_override=prediction_url)
+ return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'bestAlternateOnly': best_alternate_only}, server_override=prediction_url)
def check_constraints(self, deployment_token: str, deployment_id: str, query_data: dict) -> Dict:
"""Check for any constraints violated by the overrides.
@@ -689,7 +690,7 @@ def execute_conversation_agent(self, deployment_token: str, deployment_id: str,
deployment_id, deployment_token) if deployment_token else None
return self._call_api('executeConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos}, server_override=prediction_url)
- def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None) -> List[DocumentRetrieverLookupResult]:
+ def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None) -> List[DocumentRetrieverLookupResult]:
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
@@ -709,12 +710,13 @@ def lookup_matches(self, deployment_token: str, deployment_id: str, data: str =
max_words_per_chunk (int): If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
score_multiplier_column (str): If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
min_score (float): If provided, will filter out the results with score less than the value specified.
+ required_phrases (list): If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
Returns:
list[DocumentRetrieverLookupResult]: The relevant documentation results found from the document retriever."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('lookupMatches', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'data': data, 'filters': filters, 'num': num, 'resultColumns': result_columns, 'maxWords': max_words, 'numRetrievalMarginWords': num_retrieval_margin_words, 'maxWordsPerChunk': max_words_per_chunk, 'scoreMultiplierColumn': score_multiplier_column, 'minScore': min_score}, parse_type=DocumentRetrieverLookupResult, server_override=prediction_url)
+ return self._call_api('lookupMatches', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'data': data, 'filters': filters, 'num': num, 'resultColumns': result_columns, 'maxWords': max_words, 'numRetrievalMarginWords': num_retrieval_margin_words, 'maxWordsPerChunk': max_words_per_chunk, 'scoreMultiplierColumn': score_multiplier_column, 'minScore': min_score, 'requiredPhrases': required_phrases}, parse_type=DocumentRetrieverLookupResult, server_override=prediction_url)
def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, blobs: None = None) -> Dict:
"""Executes a deployed AI agent function with binary data as inputs.
diff --git a/abacusai/project.py b/abacusai/project.py
index fe58d5e30..61180da61 100644
--- a/abacusai/project.py
+++ b/abacusai/project.py
@@ -2,7 +2,7 @@
from .api_class import (
AlertActionConfig, AlertConditionConfig, DocumentRetrieverConfig,
- ForecastingMonitorConfig, TrainingConfig
+ ForecastingMonitorConfig, TrainingConfig, WorkflowGraph
)
from .return_class import AbstractApiClass
@@ -604,7 +604,7 @@ def create_chat_session(self, name: str = None):
"""
return self.client.create_chat_session(self.project_id, name)
- def create_agent(self, function_source_code: str, agent_function_name: str, name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, evaluation_feature_group_id: str = None, agent_input_schema: dict = None, agent_output_schema: dict = None):
+ def create_agent(self, function_source_code: str = None, agent_function_name: str = None, name: str = None, memory: int = None, package_requirements: list = None, description: str = None, enable_binary_input: bool = False, evaluation_feature_group_id: str = None, agent_input_schema: dict = None, agent_output_schema: dict = None, workflow_graph: Union[dict, WorkflowGraph] = None):
"""
Creates a new AI agent.
@@ -619,11 +619,24 @@ def create_agent(self, function_source_code: str, agent_function_name: str, name
evaluation_feature_group_id (str): The ID of the feature group to use for evaluation.
agent_input_schema (dict): The schema of the input data for the agent, which conforms to the react-json-schema-form standard.
agent_output_schema (dict): The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
+ workflow_graph (WorkflowGraph): The workflow graph for the agent.
Returns:
Agent: The new agent
"""
- return self.client.create_agent(self.project_id, function_source_code, agent_function_name, name, memory, package_requirements, description, enable_binary_input, evaluation_feature_group_id, agent_input_schema, agent_output_schema)
+ return self.client.create_agent(self.project_id, function_source_code, agent_function_name, name, memory, package_requirements, description, enable_binary_input, evaluation_feature_group_id, agent_input_schema, agent_output_schema, workflow_graph)
+
+ def list_agents(self):
+ """
+ Retrieves the list of agents in the specified project.
+
+ Args:
+ project_id (str): The unique identifier associated with the project.
+
+ Returns:
+ list[Agent]: A list of agents in the project.
+ """
+ return self.client.list_agents(self.project_id)
def create_document_retriever(self, name: str, feature_group_id: str, document_retriever_config: Union[dict, DocumentRetrieverConfig] = None):
"""
diff --git a/abacusai/public.pem b/abacusai/public.pem
index 66b280492..10f75458a 100644
--- a/abacusai/public.pem
+++ b/abacusai/public.pem
@@ -1,5 +1,5 @@
-----BEGIN RSA PUBLIC KEY-----
-MIGJAoGBAJHbcVIwmETUUeR+tYt8xb015XMGV9eOhPsuyMCWjzdtCX1VZ83iPWPs
-JmygZpAqiV9ANGo562gFh8+xW10gJuQ698b+fuAuM8nMzxtKf3opbjBX1SMJxFms
-S0hU5MnMUJ4ZBVmbLB+EwbTECiiMBiu5EaOoZcr5918nw9xXjRP1AgMBAAE=
+MIGJAoGBAO/pYzoFUk4sCR4kE1OsBormZ5tQ7hl5t1Oq2FcmqLwrxUbjONldoBCs
+a9YyV8Q+nS39uuFa9noRrgfT90w+07jggLm2Q1psxOw79BkprDlcpXbM+abT9yjx
+ju6riUZLN+hSszrb7xV4bA+dGXyTMWMhsaGo4rDjsAMSbNzR56qlAgMBAAE=
-----END RSA PUBLIC KEY-----
diff --git a/abacusai/python_function_validator.py b/abacusai/python_function_validator.py
index eda43840d..05f449b70 100644
--- a/abacusai/python_function_validator.py
+++ b/abacusai/python_function_validator.py
@@ -6,8 +6,6 @@
from contextlib import contextmanager
from typing import Any, Dict, Generator, List, Optional
-from pandas import DataFrame
-
def validate_function_locally(client, python_function_name: str, kwargs: Dict = None) -> Any:
"""
@@ -25,6 +23,8 @@ def validate_function_locally(client, python_function_name: str, kwargs: Dict =
TypeError: If an Input Feature Group argument has an invalid type or argument is missing.
Exception: If an error occurs while validating the Python function.
"""
+ from pandas import DataFrame
+
kwargs = kwargs or {}
# Get the function metadata from the AbacusAI client.
function_metadata = client.describe_python_function(python_function_name)
diff --git a/docs/_sources/autoapi/abacusai/agent/index.rst.txt b/docs/_sources/autoapi/abacusai/agent/index.rst.txt
index 571794444..308ba195a 100644
--- a/docs/_sources/autoapi/abacusai/agent/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/agent/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: Agent(client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={})
+.. py:class:: Agent(client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={}, workflowGraph={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -85,6 +85,19 @@ Classes
:rtype: Agent
+ .. py:method:: list_versions(limit = 100, start_after_version = None)
+
+ List all versions of an agent.
+
+ :param limit: If provided, limits the number of agent versions returned.
+ :type limit: int
+ :param start_after_version: Unique string identifier of the version after which the list starts.
+ :type start_after_version: str
+
+ :returns: An array of Agent versions.
+ :rtype: list[AgentVersion]
+
+
.. py:method:: wait_for_publish(timeout=None)
A waiting call until agent is published.
diff --git a/docs/_sources/autoapi/abacusai/agent_version/index.rst.txt b/docs/_sources/autoapi/abacusai/agent_version/index.rst.txt
index a8fe09c5c..b839e06ab 100644
--- a/docs/_sources/autoapi/abacusai/agent_version/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/agent_version/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: AgentVersion(client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={})
+.. py:class:: AgentVersion(client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={}, workflowGraph={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
diff --git a/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
index b54cc7fdc..d5aa8446b 100644
--- a/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
@@ -13,6 +13,11 @@ Classes
.. autoapisummary::
abacusai.api_class.ai_agents.FieldDescriptor
+ abacusai.api_class.ai_agents.WorkflowNodeInputMapping
+ abacusai.api_class.ai_agents.WorkflowNodeOutputMapping
+ abacusai.api_class.ai_agents.WorkflowGraphNode
+ abacusai.api_class.ai_agents.WorkflowGraphEdge
+ abacusai.api_class.ai_agents.WorkflowGraph
@@ -54,3 +59,182 @@ Classes
+.. py:class:: WorkflowNodeInputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of input to a workflow node.
+
+ :param name: The name of the input.
+ :type name: str
+ :param variable_type: The type of the input.
+ :type variable_type: str
+ :param workflow_variable_source: The workflow source stage of the input.
+ :type workflow_variable_source: str
+ :param is_required: Whether the input is required.
+ :type is_required: bool
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeInputType
+
+
+
+ .. py:attribute:: workflow_variable_source
+ :type: str
+
+
+
+ .. py:attribute:: is_required
+ :type: bool
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowNodeOutputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of output to a workflow node.
+
+ :param name: The name of the output.
+ :type name: str
+ :param variable_type: The type of the output.
+ :type variable_type: str
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeOutputType
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowGraphNode(name, input_mappings, output_mappings, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, package_requirements = None)
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A node in an Agent workflow graph.
+
+ :param name: Display name of the worflow node.
+ :type name: str
+ :param input_mappings: List of input mappings for the node.
+ :type input_mappings: List[WorkflowNodeInputMapping]
+ :param output_mappings: List of output mappings for the node.
+ :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param function: The callable node function reference if available.
+ :type function: callable
+ :param function_name: The name of the function if available.
+ :type function_name: str
+ :param source_code: The source code of the function if available.
+ :type source_code: str
+ :param input_schema: The react json schema for the input form if applicable.
+ :type input_schema: dict
+ :param output_schema: The react json schema for the output if applicable.
+ :type output_schema: dict
+ :param package_requirements: List of package requirements for the node.
+ :type package_requirements: list
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: WorkflowGraphEdge
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An edge in an Agent workflow graph.
+
+ :param source: The source node of the edge.
+ :type source: str
+ :param target: The target node of the edge.
+ :type target: str
+ :param details: Additional details about the edge.
+ :type details: dict
+
+ .. py:attribute:: source
+ :type: str
+
+
+
+ .. py:attribute:: target
+ :type: str
+
+
+
+ .. py:attribute:: details
+ :type: dict
+
+
+
+ .. py:method:: to_nx_edge()
+
+
+
+.. py:class:: WorkflowGraph
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An Agent workflow graph.
+
+ :param nodes: A list of nodes in the workflow graph.
+ :type nodes: List[WorkflowGraphNode]
+ :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target and details.
+ :type edges: List[WorkflowGraphEdge]
+
+ .. py:attribute:: nodes
+ :type: List[WorkflowGraphNode]
+
+
+
+ .. py:attribute:: edges
+ :type: List[WorkflowGraphEdge]
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(graph)
+ :classmethod:
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
index f379d234a..eb367beb0 100644
--- a/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
@@ -75,7 +75,7 @@ Classes
:type item_attributes_to_include_in_the_result: list
:param explain_predictions: If True, calculates explanations for the forecasted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -217,7 +217,7 @@ Classes
:type output_columns: list
:param explain_predictions: If True, calculates explanations for the predicted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -411,7 +411,7 @@ Classes
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
diff --git a/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
index 19483bf4e..49575a3be 100644
--- a/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
@@ -21,6 +21,7 @@ Classes
abacusai.api_class.dataset_application_connector.SharepointDatasetConfig
abacusai.api_class.dataset_application_connector.ZendeskDatasetConfig
abacusai.api_class.dataset_application_connector.AbacusUsageMetricsDatasetConfig
+ abacusai.api_class.dataset_application_connector.FreshserviceDatasetConfig
abacusai.api_class.dataset_application_connector._DatasetConfigFactory
@@ -312,6 +313,17 @@ Classes
+.. py:class:: FreshserviceDatasetConfig
+
+
+ Bases: :py:obj:`DatasetConfig`
+
+ Dataset config for Freshservice Application Connector
+
+ .. py:method:: __post_init__()
+
+
+
.. py:class:: _DatasetConfigFactory
diff --git a/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
index d2a3b7bcb..602af523b 100644
--- a/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
@@ -61,6 +61,8 @@ Classes
abacusai.api_class.enums.PythonFunctionType
abacusai.api_class.enums.EvalArtifactType
abacusai.api_class.enums.FieldDescriptorType
+ abacusai.api_class.enums.WorkflowNodeInputType
+ abacusai.api_class.enums.WorkflowNodeOutputType
abacusai.api_class.enums.OcrMode
abacusai.api_class.enums.StdDevThresholdType
abacusai.api_class.enums.DataType
@@ -1582,6 +1584,11 @@ Classes
+ .. py:attribute:: FRESHSERVICE
+ :value: 'FRESHSERVICE'
+
+
+
.. py:class:: PythonFunctionArgumentType
@@ -1852,6 +1859,11 @@ Classes
+ .. py:attribute:: GEMINI_1_5_PRO
+ :value: 'GEMINI_1_5_PRO'
+
+
+
.. py:attribute:: MIXTRAL_CHAT
:value: 'MIXTRAL_CHAT'
@@ -2128,6 +2140,66 @@ Classes
+.. py:class:: WorkflowNodeInputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: USER_INPUT
+ :value: 'USER_INPUT'
+
+
+
+ .. py:attribute:: WORKFLOW_VARIABLE
+ :value: 'WORKFLOW_VARIABLE'
+
+
+
+
+.. py:class:: WorkflowNodeOutputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: INTEGER
+ :value: 'INTEGER'
+
+
+
+ .. py:attribute:: STRING
+ :value: 'STRING'
+
+
+
+ .. py:attribute:: BOOLEAN
+ :value: 'BOOLEAN'
+
+
+
+ .. py:attribute:: FLOAT
+ :value: 'FLOAT'
+
+
+
+ .. py:attribute:: JSON
+ :value: 'JSON'
+
+
+
+ .. py:attribute:: LIST
+ :value: 'LIST'
+
+
+
+
.. py:class:: OcrMode
diff --git a/docs/_sources/autoapi/abacusai/api_class/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
index 4c60c06a2..2edc25123 100644
--- a/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
@@ -39,6 +39,11 @@ Classes
abacusai.api_class.ApiClass
abacusai.api_class.FieldDescriptor
+ abacusai.api_class.WorkflowNodeInputMapping
+ abacusai.api_class.WorkflowNodeOutputMapping
+ abacusai.api_class.WorkflowGraphNode
+ abacusai.api_class.WorkflowGraphEdge
+ abacusai.api_class.WorkflowGraph
abacusai.api_class.ApiClass
abacusai.api_class.HotkeyPrompt
abacusai.api_class.ApiClass
@@ -73,6 +78,7 @@ Classes
abacusai.api_class.SharepointDatasetConfig
abacusai.api_class.ZendeskDatasetConfig
abacusai.api_class.AbacusUsageMetricsDatasetConfig
+ abacusai.api_class.FreshserviceDatasetConfig
abacusai.api_class._DatasetConfigFactory
abacusai.api_class.ApiClass
abacusai.api_class._ApiClassFactory
@@ -139,6 +145,8 @@ Classes
abacusai.api_class.PythonFunctionType
abacusai.api_class.EvalArtifactType
abacusai.api_class.FieldDescriptorType
+ abacusai.api_class.WorkflowNodeInputType
+ abacusai.api_class.WorkflowNodeOutputType
abacusai.api_class.OcrMode
abacusai.api_class.StdDevThresholdType
abacusai.api_class.DataType
@@ -184,6 +192,8 @@ Classes
abacusai.api_class.ForecastingMonitorConfig
abacusai.api_class.StdDevThreshold
abacusai.api_class.ItemAttributesStdDevThreshold
+ abacusai.api_class.RestrictFeatureMappings
+ abacusai.api_class.MonitorFilteringConfig
abacusai.api_class.ApiClass
abacusai.api_class._ApiClassFactory
abacusai.api_class.AlertConditionConfig
@@ -312,6 +322,185 @@ Classes
+.. py:class:: WorkflowNodeInputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of input to a workflow node.
+
+ :param name: The name of the input.
+ :type name: str
+ :param variable_type: The type of the input.
+ :type variable_type: str
+ :param workflow_variable_source: The workflow source stage of the input.
+ :type workflow_variable_source: str
+ :param is_required: Whether the input is required.
+ :type is_required: bool
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeInputType
+
+
+
+ .. py:attribute:: workflow_variable_source
+ :type: str
+
+
+
+ .. py:attribute:: is_required
+ :type: bool
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowNodeOutputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of output to a workflow node.
+
+ :param name: The name of the output.
+ :type name: str
+ :param variable_type: The type of the output.
+ :type variable_type: str
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeOutputType
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowGraphNode(name, input_mappings, output_mappings, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, package_requirements = None)
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A node in an Agent workflow graph.
+
+ :param name: Display name of the worflow node.
+ :type name: str
+ :param input_mappings: List of input mappings for the node.
+ :type input_mappings: List[WorkflowNodeInputMapping]
+ :param output_mappings: List of output mappings for the node.
+ :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param function: The callable node function reference if available.
+ :type function: callable
+ :param function_name: The name of the function if available.
+ :type function_name: str
+ :param source_code: The source code of the function if available.
+ :type source_code: str
+ :param input_schema: The react json schema for the input form if applicable.
+ :type input_schema: dict
+ :param output_schema: The react json schema for the output if applicable.
+ :type output_schema: dict
+ :param package_requirements: List of package requirements for the node.
+ :type package_requirements: list
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: WorkflowGraphEdge
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An edge in an Agent workflow graph.
+
+ :param source: The source node of the edge.
+ :type source: str
+ :param target: The target node of the edge.
+ :type target: str
+ :param details: Additional details about the edge.
+ :type details: dict
+
+ .. py:attribute:: source
+ :type: str
+
+
+
+ .. py:attribute:: target
+ :type: str
+
+
+
+ .. py:attribute:: details
+ :type: dict
+
+
+
+ .. py:method:: to_nx_edge()
+
+
+
+.. py:class:: WorkflowGraph
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An Agent workflow graph.
+
+ :param nodes: A list of nodes in the workflow graph.
+ :type nodes: List[WorkflowGraphNode]
+ :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target and details.
+ :type edges: List[WorkflowGraphEdge]
+
+ .. py:attribute:: nodes
+ :type: List[WorkflowGraphNode]
+
+
+
+ .. py:attribute:: edges
+ :type: List[WorkflowGraphEdge]
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(graph)
+ :classmethod:
+
+
+
.. py:class:: ApiClass
@@ -538,7 +727,7 @@ Classes
:type item_attributes_to_include_in_the_result: list
:param explain_predictions: If True, calculates explanations for the forecasted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -680,7 +869,7 @@ Classes
:type output_columns: list
:param explain_predictions: If True, calculates explanations for the predicted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -874,7 +1063,7 @@ Classes
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -1618,6 +1807,17 @@ Classes
+.. py:class:: FreshserviceDatasetConfig
+
+
+ Bases: :py:obj:`DatasetConfig`
+
+ Dataset config for Freshservice Application Connector
+
+ .. py:method:: __post_init__()
+
+
+
.. py:class:: _DatasetConfigFactory
@@ -3750,6 +3950,11 @@ Classes
+ .. py:attribute:: FRESHSERVICE
+ :value: 'FRESHSERVICE'
+
+
+
.. py:class:: PythonFunctionArgumentType
@@ -4020,6 +4225,11 @@ Classes
+ .. py:attribute:: GEMINI_1_5_PRO
+ :value: 'GEMINI_1_5_PRO'
+
+
+
.. py:attribute:: MIXTRAL_CHAT
:value: 'MIXTRAL_CHAT'
@@ -4296,6 +4506,66 @@ Classes
+.. py:class:: WorkflowNodeInputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: USER_INPUT
+ :value: 'USER_INPUT'
+
+
+
+ .. py:attribute:: WORKFLOW_VARIABLE
+ :value: 'WORKFLOW_VARIABLE'
+
+
+
+
+.. py:class:: WorkflowNodeOutputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: INTEGER
+ :value: 'INTEGER'
+
+
+
+ .. py:attribute:: STRING
+ :value: 'STRING'
+
+
+
+ .. py:attribute:: BOOLEAN
+ :value: 'BOOLEAN'
+
+
+
+ .. py:attribute:: FLOAT
+ :value: 'FLOAT'
+
+
+
+ .. py:attribute:: JSON
+ :value: 'JSON'
+
+
+
+ .. py:attribute:: LIST
+ :value: 'LIST'
+
+
+
+
.. py:class:: OcrMode
@@ -5865,8 +6135,6 @@ Classes
:type active_labels_column: str
:param document_format: Format of the input documents.
:type document_format: NLPDocumentFormat
- :param include_longformer: Whether to include the longformer model.
- :type include_longformer: bool
:param minimum_bounding_box_overlap_ratio: Tokens are considered to belong to annotation if the user bounding box is provided and ratio of (token_bounding_box ∩ annotation_bounding_box) / token_bounding_area is greater than the provided value.
:type minimum_bounding_box_overlap_ratio: float
:param save_predicted_pdf: Whether to save predicted PDF documents
@@ -5948,8 +6216,6 @@ Classes
:type search_chunk_size: int
:param chunk_overlap_fraction: Overlap in chunks while indexing the documents.
:type chunk_overlap_fraction: float
- :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
- :type test_split: int
:param index_fraction: Fraction of the chunk to use for indexing.
:type index_fraction: float
@@ -6615,52 +6881,10 @@ Classes
Training config for the CUSTOM_ALGORITHM problem type
- :param train_function_name: The name of the train function.
- :type train_function_name: str
- :param predict_many_function_name: The name of the predict many function.
- :type predict_many_function_name: str
- :param training_input_tables: List of tables to use for training.
- :type training_input_tables: List[str]
- :param predict_function_name: Optional name of the predict function if the predict many function is not given.
- :type predict_function_name: str
- :param train_module_name: The name of the train module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type train_module_name: str
- :param predict_module_name: The name of the predict module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type predict_module_name: str
- :param test_split: Percent of dataset to use for test data. We support using a range between 6% to 20% of your dataset to use as test data.
- :type test_split: int
-
- .. py:attribute:: train_function_name
- :type: str
-
-
-
- .. py:attribute:: predict_many_function_name
- :type: str
-
-
-
- .. py:attribute:: training_input_tables
- :type: List[str]
-
-
-
- .. py:attribute:: predict_function_name
- :type: str
+ :param timeout_minutes: Timeout for the model training in minutes.
+ :type timeout_minutes: int
-
-
- .. py:attribute:: train_module_name
- :type: str
-
-
-
- .. py:attribute:: predict_module_name
- :type: str
-
-
-
- .. py:attribute:: test_split
+ .. py:attribute:: timeout_minutes
:type: int
@@ -6987,6 +7211,80 @@ Classes
+.. py:class:: RestrictFeatureMappings
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Restrict Feature Mappings for Monitor Filtering
+
+ :param feature_name: The name of the feature to restrict the monitor to.
+ :type feature_name: str
+ :param restricted_feature_values: The values of the feature to restrict the monitor to.
+ :type restricted_feature_values: list
+
+ .. py:attribute:: feature_name
+ :type: str
+
+
+
+ .. py:attribute:: restricted_feature_values
+ :type: list
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: MonitorFilteringConfig
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Monitor Filtering Configuration
+
+ :param start_time: The start time of the prediction time col
+ :type start_time: str
+ :param end_time: The end time of the prediction time col
+ :type end_time: str
+ :param restrict_feature_mapping: The feature mapping to restrict the monitor to.
+ :type restrict_feature_mapping: RestrictFeatureMappings
+ :param target_class: The target class to restrict the monitor to.
+ :type target_class: str
+
+ .. py:attribute:: start_time
+ :type: str
+
+
+
+ .. py:attribute:: end_time
+ :type: str
+
+
+
+ .. py:attribute:: restrict_feature_mappings
+ :type: List[RestrictFeatureMappings]
+
+
+
+ .. py:attribute:: target_class
+ :type: str
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
.. py:class:: ApiClass
diff --git a/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
index a00149a52..80c46612e 100644
--- a/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
@@ -1112,8 +1112,6 @@ Classes
:type active_labels_column: str
:param document_format: Format of the input documents.
:type document_format: NLPDocumentFormat
- :param include_longformer: Whether to include the longformer model.
- :type include_longformer: bool
:param minimum_bounding_box_overlap_ratio: Tokens are considered to belong to annotation if the user bounding box is provided and ratio of (token_bounding_box ∩ annotation_bounding_box) / token_bounding_area is greater than the provided value.
:type minimum_bounding_box_overlap_ratio: float
:param save_predicted_pdf: Whether to save predicted PDF documents
@@ -1195,8 +1193,6 @@ Classes
:type search_chunk_size: int
:param chunk_overlap_fraction: Overlap in chunks while indexing the documents.
:type chunk_overlap_fraction: float
- :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
- :type test_split: int
:param index_fraction: Fraction of the chunk to use for indexing.
:type index_fraction: float
@@ -1862,52 +1858,10 @@ Classes
Training config for the CUSTOM_ALGORITHM problem type
- :param train_function_name: The name of the train function.
- :type train_function_name: str
- :param predict_many_function_name: The name of the predict many function.
- :type predict_many_function_name: str
- :param training_input_tables: List of tables to use for training.
- :type training_input_tables: List[str]
- :param predict_function_name: Optional name of the predict function if the predict many function is not given.
- :type predict_function_name: str
- :param train_module_name: The name of the train module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type train_module_name: str
- :param predict_module_name: The name of the predict module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type predict_module_name: str
- :param test_split: Percent of dataset to use for test data. We support using a range between 6% to 20% of your dataset to use as test data.
- :type test_split: int
-
- .. py:attribute:: train_function_name
- :type: str
-
-
+ :param timeout_minutes: Timeout for the model training in minutes.
+ :type timeout_minutes: int
- .. py:attribute:: predict_many_function_name
- :type: str
-
-
-
- .. py:attribute:: training_input_tables
- :type: List[str]
-
-
-
- .. py:attribute:: predict_function_name
- :type: str
-
-
-
- .. py:attribute:: train_module_name
- :type: str
-
-
-
- .. py:attribute:: predict_module_name
- :type: str
-
-
-
- .. py:attribute:: test_split
+ .. py:attribute:: timeout_minutes
:type: int
diff --git a/docs/_sources/autoapi/abacusai/api_class/monitor/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/monitor/index.rst.txt
index 89e9d5a6d..99474c56d 100644
--- a/docs/_sources/autoapi/abacusai/api_class/monitor/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/monitor/index.rst.txt
@@ -16,6 +16,8 @@ Classes
abacusai.api_class.monitor.ForecastingMonitorConfig
abacusai.api_class.monitor.StdDevThreshold
abacusai.api_class.monitor.ItemAttributesStdDevThreshold
+ abacusai.api_class.monitor.RestrictFeatureMappings
+ abacusai.api_class.monitor.MonitorFilteringConfig
@@ -168,3 +170,77 @@ Classes
+.. py:class:: RestrictFeatureMappings
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Restrict Feature Mappings for Monitor Filtering
+
+ :param feature_name: The name of the feature to restrict the monitor to.
+ :type feature_name: str
+ :param restricted_feature_values: The values of the feature to restrict the monitor to.
+ :type restricted_feature_values: list
+
+ .. py:attribute:: feature_name
+ :type: str
+
+
+
+ .. py:attribute:: restricted_feature_values
+ :type: list
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: MonitorFilteringConfig
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Monitor Filtering Configuration
+
+ :param start_time: The start time of the prediction time col
+ :type start_time: str
+ :param end_time: The end time of the prediction time col
+ :type end_time: str
+ :param restrict_feature_mapping: The feature mapping to restrict the monitor to.
+ :type restrict_feature_mapping: RestrictFeatureMappings
+ :param target_class: The target class to restrict the monitor to.
+ :type target_class: str
+
+ .. py:attribute:: start_time
+ :type: str
+
+
+
+ .. py:attribute:: end_time
+ :type: str
+
+
+
+ .. py:attribute:: restrict_feature_mappings
+ :type: List[RestrictFeatureMappings]
+
+
+
+ .. py:attribute:: target_class
+ :type: str
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/client/index.rst.txt b/docs/_sources/autoapi/abacusai/client/index.rst.txt
index db59bfae6..a541d98e9 100644
--- a/docs/_sources/autoapi/abacusai/client/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/client/index.rst.txt
@@ -487,7 +487,7 @@ Attributes
:type skip_version_check: bool
.. py:attribute:: client_version
- :value: '1.2.1'
+ :value: '1.2.2'
@@ -2074,17 +2074,6 @@ Attributes
:rtype: list[FeatureGroupRow]
- .. py:method:: list_pending_feature_group_documents(feature_group_id)
-
- Lists all pending documents added to feature group.
-
- :param feature_group_id: The unique ID associated with the feature group.
- :type feature_group_id: str
-
- :returns: A list of pending feature group documents.
- :rtype: list[FeatureGroupDocument]
-
-
.. py:method:: describe_python_function(name)
Describe a Python Function.
@@ -2527,6 +2516,32 @@ Attributes
:rtype: list[OrganizationSearchResult]
+ .. py:method:: list_agents(project_id)
+
+ Retrieves the list of agents in the specified project.
+
+ :param project_id: The unique identifier associated with the project.
+ :type project_id: str
+
+ :returns: A list of agents in the project.
+ :rtype: list[Agent]
+
+
+ .. py:method:: list_agent_versions(agent_id, limit = 100, start_after_version = None)
+
+ List all versions of an agent.
+
+ :param agent_id: The unique identifier associated with the agent.
+ :type agent_id: str
+ :param limit: If provided, limits the number of agent versions returned.
+ :type limit: int
+ :param start_after_version: Unique string identifier of the version after which the list starts.
+ :type start_after_version: str
+
+ :returns: An array of Agent versions.
+ :rtype: list[AgentVersion]
+
+
.. py:method:: list_document_retrievers(project_id, limit = 100, start_after_id = None)
List all the document retrievers.
@@ -3414,7 +3429,7 @@ Attributes
.. py:method:: _get_doc_retriever_deployment_info(document_retriever_id)
- .. py:method:: get_matching_documents(document_retriever_id, query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: get_matching_documents(document_retriever_id, query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -3443,6 +3458,8 @@ Attributes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score lower than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will have at least one of the phrases.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
@@ -3951,7 +3968,7 @@ Attributes
:type use_original_csv_names: bool
:param python_function_name: Name of Python Function that contains the source code and function arguments.
:type python_function_name: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
:param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
:type use_gpu: bool
@@ -4620,11 +4637,11 @@ Attributes
:param feature_group_id: The unique ID associated with the feature group.
:type feature_group_id: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
- .. py:method:: update_feature_group_python_function(feature_group_id, python_function_name, python_function_bindings = [])
+ .. py:method:: update_feature_group_python_function(feature_group_id, python_function_name, python_function_bindings = None, cpu_size = None, memory = None, use_gpu = None)
Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function
@@ -4636,8 +4653,14 @@ Attributes
:type feature_group_id: str
:param python_function_name: The name of the python function to be associated with the feature group.
:type python_function_name: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
+ :param cpu_size: Size of the CPU for the feature group python function.
+ :type cpu_size: str
+ :param memory: Memory (in GB) for the feature group python function.
+ :type memory: int
+ :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
+ :type use_gpu: bool
.. py:method:: update_feature_group_sql_definition(feature_group_id, sql)
@@ -4686,7 +4709,7 @@ Attributes
:type package_requirements: list
:param use_original_csv_names: If set to `True`, feature group uses the original column names for input feature groups from CSV datasets.
:type use_original_csv_names: bool
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of PythonFunctionArgument objects that represent the bindings for the Python function.
:type python_function_bindings: list
:param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
:type use_gpu: bool
@@ -5502,7 +5525,7 @@ Attributes
:type name: str
- .. py:method:: update_python_model(model_id, function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None)
+ .. py:method:: update_python_model(model_id, function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None, training_config = None)
Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.
@@ -5533,6 +5556,8 @@ Attributes
:type use_gpu: bool
:param is_thread_safe: Whether this model is thread safe
:type is_thread_safe: bool
+ :param training_config: The training config used to train this model.
+ :type training_config: TrainingConfig
:returns: The updated model.
:rtype: Model
@@ -7132,7 +7157,7 @@ Attributes
:type include_all_assignments: bool
- .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None)
+ .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)
Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
@@ -7142,10 +7167,12 @@ Attributes
:type deployment_id: str
:param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
:type query_data: dict
- :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
+ :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
:type add_constraints: list
:param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
:type solve_time_limit_seconds: float
+ :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
+ :type best_alternate_only: bool
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
@@ -7344,7 +7371,7 @@ Attributes
:type doc_infos: list
- .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -7375,6 +7402,8 @@ Attributes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score less than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
@@ -7677,19 +7706,6 @@ Attributes
:type primary_key: str
- .. py:method:: add_feature_group_document(feature_group_id, document)
-
- Adds a document to the feature group.
-
- :param feature_group_id: The unique ID associated with the feature group.
- :type feature_group_id: str
- :param document: The multipart/form-data of the document to add to the feature group.
- :type document: io.TextIOBase
-
- :returns: The feature group document that was added.
- :rtype: FeatureGroupDocument
-
-
.. py:method:: describe_feature_group_row_process_by_key(deployment_id, primary_key_value)
Gets the feature group row process.
@@ -7937,7 +7953,7 @@ Attributes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
@@ -7974,7 +7990,7 @@ Attributes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
@@ -8661,7 +8677,7 @@ Attributes
:type external_application_id: str
- .. py:method:: create_agent(project_id, function_source_code, agent_function_name, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: create_agent(project_id, function_source_code = None, agent_function_name = None, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Creates a new AI agent.
@@ -8687,12 +8703,14 @@ Attributes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The new agent
:rtype: Agent
- .. py:method:: update_agent(model_id, function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: update_agent(model_id, function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Updates an existing AI Agent using user-provided Python code. A new version of the agent will be created and published.
@@ -8714,6 +8732,8 @@ Attributes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The updated agent
:rtype: Agent
diff --git a/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt b/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
index 7d0bfe37b..88cf6c40d 100644
--- a/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, chatType=None, agentResponse=None, error=None, segments=None)
+.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeKey=None, chatType=None, agentResponse=None, error=None, segments=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -56,6 +56,8 @@ Classes
:type attachments: list
:param responseVersion: The version of the response, used to differentiate w/ legacy agent response.
:type responseVersion: str
+ :param agentWorkflowNodeKey: The workflow node key associated with the agent response.
+ :type agentWorkflowNodeKey: str
:param chatType: The type of chat llm that was run for the message.
:type chatType: str
:param agentResponse: Response from the agent. Only for conversation with agents.
diff --git a/docs/_sources/autoapi/abacusai/document_retriever/index.rst.txt b/docs/_sources/autoapi/abacusai/document_retriever/index.rst.txt
index 2badfdf8b..ab17d2950 100644
--- a/docs/_sources/autoapi/abacusai/document_retriever/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/document_retriever/index.rst.txt
@@ -167,11 +167,11 @@ Classes
:rtype: str
- .. py:method:: get_matching_documents(query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None)
+ .. py:method:: get_matching_documents(query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
- Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
+ Original documents are split into chunks and stored in the document retriever. This lookup function will return the relevant chunks
from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.
@@ -192,6 +192,10 @@ Classes
:type max_words_per_chunk: int
:param score_multiplier_column: If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
:type score_multiplier_column: str
+ :param min_score: If provided, will filter out the results with score lower than the value specified.
+ :type min_score: float
+ :param required_phrases: If provided, each result will have at least one of the phrases.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
diff --git a/docs/_sources/autoapi/abacusai/drift_distributions/index.rst.txt b/docs/_sources/autoapi/abacusai/drift_distributions/index.rst.txt
index b5d477ada..b9ad7dcbf 100644
--- a/docs/_sources/autoapi/abacusai/drift_distributions/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/drift_distributions/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: DriftDistributions(client, labelDrift={}, predictionDrift={})
+.. py:class:: DriftDistributions(client, labelDrift={}, predictionDrift={}, bpPredictionDrift={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -30,6 +30,8 @@ Classes
:type labelDrift: DriftDistribution
:param predictionDrift: A DriftDistribution describing column names and the range of values for prediction drift.
:type predictionDrift: DriftDistribution
+ :param bpPredictionDrift: A DriftDistribution describing column names and the range of values for prediction drift, when the predictions come from BP.
+ :type bpPredictionDrift: DriftDistribution
.. py:method:: __repr__()
diff --git a/docs/_sources/autoapi/abacusai/feature_group/index.rst.txt b/docs/_sources/autoapi/abacusai/feature_group/index.rst.txt
index 4de91922f..e14f94859 100644
--- a/docs/_sources/autoapi/abacusai/feature_group/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/feature_group/index.rst.txt
@@ -810,11 +810,11 @@ Classes
Updates an existing Feature Group's Python function bindings from a user-provided Python Function. If a list of feature groups are supplied within the Python function bindings, we will provide DataFrames (Pandas in the case of Python) with the materialized feature groups for those input feature groups as arguments to the function.
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
- .. py:method:: update_python_function(python_function_name, python_function_bindings = [])
+ .. py:method:: update_python_function(python_function_name, python_function_bindings = None, cpu_size = None, memory = None, use_gpu = None)
Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function
@@ -824,8 +824,14 @@ Classes
:param python_function_name: The name of the python function to be associated with the feature group.
:type python_function_name: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
+ :param cpu_size: Size of the CPU for the feature group python function.
+ :type cpu_size: str
+ :param memory: Memory (in GB) for the feature group python function.
+ :type memory: int
+ :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
+ :type use_gpu: bool
.. py:method:: update_sql_definition(sql)
@@ -868,7 +874,7 @@ Classes
:type package_requirements: list
:param use_original_csv_names: If set to `True`, feature group uses the original column names for input feature groups from CSV datasets.
:type use_original_csv_names: bool
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of PythonFunctionArgument objects that represent the bindings for the Python function.
:type python_function_bindings: list
:param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
:type use_gpu: bool
@@ -1105,28 +1111,6 @@ Classes
:rtype: list[FeatureGroupRow]
- .. py:method:: add_document(document)
-
- Adds a document to the feature group.
-
- :param document: The multipart/form-data of the document to add to the feature group.
- :type document: io.TextIOBase
-
- :returns: The feature group document that was added.
- :rtype: FeatureGroupDocument
-
-
- .. py:method:: list_pending_documents()
-
- Lists all pending documents added to feature group.
-
- :param feature_group_id: The unique ID associated with the feature group.
- :type feature_group_id: str
-
- :returns: A list of pending feature group documents.
- :rtype: list[FeatureGroupDocument]
-
-
.. py:method:: get_natural_language_explanation(feature_group_version = None, model_id = None)
Returns the saved natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model
diff --git a/docs/_sources/autoapi/abacusai/feature_performance_analysis/index.rst.txt b/docs/_sources/autoapi/abacusai/feature_performance_analysis/index.rst.txt
new file mode 100644
index 000000000..bd3cbe220
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/feature_performance_analysis/index.rst.txt
@@ -0,0 +1,49 @@
+:py:mod:`abacusai.feature_performance_analysis`
+===============================================
+
+.. py:module:: abacusai.feature_performance_analysis
+
+
+Module Contents
+---------------
+
+Classes
+~~~~~~~
+
+.. autoapisummary::
+
+ abacusai.feature_performance_analysis.FeaturePerformanceAnalysis
+
+
+
+
+.. py:class:: FeaturePerformanceAnalysis(client, features=None, featureMetrics=None, metricsKeys=None)
+
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+ A feature performance analysis for Monitor
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param features: A list of the features that are being analyzed.
+ :type features: list
+ :param featureMetrics: A list of dictionary for every feature and its metrics
+ :type featureMetrics: list
+ :param metricsKeys: A list of the keys for the metrics.
+ :type metricsKeys: list
+
+ .. py:method:: __repr__()
+
+ Return repr(self).
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/index.rst.txt b/docs/_sources/autoapi/abacusai/index.rst.txt
index 3d46501f6..cd5393a49 100644
--- a/docs/_sources/autoapi/abacusai/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/index.rst.txt
@@ -113,6 +113,7 @@ Submodules
feature_group_version/index.rst
feature_importance/index.rst
feature_mapping/index.rst
+ feature_performance_analysis/index.rst
feature_record/index.rst
file_connector/index.rst
file_connector_instructions/index.rst
@@ -234,6 +235,11 @@ Classes
abacusai.ApiClass
abacusai.FieldDescriptor
+ abacusai.WorkflowNodeInputMapping
+ abacusai.WorkflowNodeOutputMapping
+ abacusai.WorkflowGraphNode
+ abacusai.WorkflowGraphEdge
+ abacusai.WorkflowGraph
abacusai.HotkeyPrompt
abacusai._ApiClassFactory
abacusai.BatchPredictionArgs
@@ -262,6 +268,7 @@ Classes
abacusai.SharepointDatasetConfig
abacusai.ZendeskDatasetConfig
abacusai.AbacusUsageMetricsDatasetConfig
+ abacusai.FreshserviceDatasetConfig
abacusai._DatasetConfigFactory
abacusai.PredictionArguments
abacusai.OptimizationPredictionArguments
@@ -324,6 +331,8 @@ Classes
abacusai.PythonFunctionType
abacusai.EvalArtifactType
abacusai.FieldDescriptorType
+ abacusai.WorkflowNodeInputType
+ abacusai.WorkflowNodeOutputType
abacusai.StdDevThresholdType
abacusai.DataType
abacusai.SamplingConfig
@@ -362,6 +371,8 @@ Classes
abacusai.ForecastingMonitorConfig
abacusai.StdDevThreshold
abacusai.ItemAttributesStdDevThreshold
+ abacusai.RestrictFeatureMappings
+ abacusai.MonitorFilteringConfig
abacusai.AlertConditionConfig
abacusai.AccuracyBelowThresholdConditionConfig
abacusai.FeatureDriftConditionConfig
@@ -499,6 +510,185 @@ Attributes
+.. py:class:: WorkflowNodeInputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of input to a workflow node.
+
+ :param name: The name of the input.
+ :type name: str
+ :param variable_type: The type of the input.
+ :type variable_type: str
+ :param workflow_variable_source: The workflow source stage of the input.
+ :type workflow_variable_source: str
+ :param is_required: Whether the input is required.
+ :type is_required: bool
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeInputType
+
+
+
+ .. py:attribute:: workflow_variable_source
+ :type: str
+
+
+
+ .. py:attribute:: is_required
+ :type: bool
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowNodeOutputMapping
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A mapping of output to a workflow node.
+
+ :param name: The name of the output.
+ :type name: str
+ :param variable_type: The type of the output.
+ :type variable_type: str
+
+ .. py:attribute:: name
+ :type: str
+
+
+
+ .. py:attribute:: variable_type
+ :type: abacusai.api_class.enums.WorkflowNodeOutputType
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: WorkflowGraphNode(name, input_mappings, output_mappings, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, package_requirements = None)
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ A node in an Agent workflow graph.
+
+ :param name: Display name of the worflow node.
+ :type name: str
+ :param input_mappings: List of input mappings for the node.
+ :type input_mappings: List[WorkflowNodeInputMapping]
+ :param output_mappings: List of output mappings for the node.
+ :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param function: The callable node function reference if available.
+ :type function: callable
+ :param function_name: The name of the function if available.
+ :type function_name: str
+ :param source_code: The source code of the function if available.
+ :type source_code: str
+ :param input_schema: The react json schema for the input form if applicable.
+ :type input_schema: dict
+ :param output_schema: The react json schema for the output if applicable.
+ :type output_schema: dict
+ :param package_requirements: List of package requirements for the node.
+ :type package_requirements: list
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: WorkflowGraphEdge
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An edge in an Agent workflow graph.
+
+ :param source: The source node of the edge.
+ :type source: str
+ :param target: The target node of the edge.
+ :type target: str
+ :param details: Additional details about the edge.
+ :type details: dict
+
+ .. py:attribute:: source
+ :type: str
+
+
+
+ .. py:attribute:: target
+ :type: str
+
+
+
+ .. py:attribute:: details
+ :type: dict
+
+
+
+ .. py:method:: to_nx_edge()
+
+
+
+.. py:class:: WorkflowGraph
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ An Agent workflow graph.
+
+ :param nodes: A list of nodes in the workflow graph.
+ :type nodes: List[WorkflowGraphNode]
+ :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target and details.
+ :type edges: List[WorkflowGraphEdge]
+
+ .. py:attribute:: nodes
+ :type: List[WorkflowGraphNode]
+
+
+
+ .. py:attribute:: edges
+ :type: List[WorkflowGraphEdge]
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+ .. py:method:: from_dict(graph)
+ :classmethod:
+
+
+
.. py:class:: HotkeyPrompt
@@ -605,7 +795,7 @@ Attributes
:type item_attributes_to_include_in_the_result: list
:param explain_predictions: If True, calculates explanations for the forecasted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -747,7 +937,7 @@ Attributes
:type output_columns: list
:param explain_predictions: If True, calculates explanations for the predicted values along with predictions.
:type explain_predictions: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -941,7 +1131,7 @@ Attributes
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
- :param automate_monitoring: If True, creates a monitor to calculate the drift for the batch prediction.
+ :param automate_monitoring: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
:type automate_monitoring: bool
.. py:attribute:: for_eval
@@ -1480,6 +1670,17 @@ Attributes
+.. py:class:: FreshserviceDatasetConfig
+
+
+ Bases: :py:obj:`DatasetConfig`
+
+ Dataset config for Freshservice Application Connector
+
+ .. py:method:: __post_init__()
+
+
+
.. py:class:: _DatasetConfigFactory
@@ -3467,6 +3668,11 @@ Attributes
+ .. py:attribute:: FRESHSERVICE
+ :value: 'FRESHSERVICE'
+
+
+
.. py:class:: PythonFunctionArgumentType
@@ -3707,6 +3913,11 @@ Attributes
+ .. py:attribute:: GEMINI_1_5_PRO
+ :value: 'GEMINI_1_5_PRO'
+
+
+
.. py:attribute:: MIXTRAL_CHAT
:value: 'MIXTRAL_CHAT'
@@ -3983,6 +4194,66 @@ Attributes
+.. py:class:: WorkflowNodeInputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: USER_INPUT
+ :value: 'USER_INPUT'
+
+
+
+ .. py:attribute:: WORKFLOW_VARIABLE
+ :value: 'WORKFLOW_VARIABLE'
+
+
+
+
+.. py:class:: WorkflowNodeOutputType
+
+
+ Bases: :py:obj:`ApiEnum`
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+ .. py:attribute:: INTEGER
+ :value: 'INTEGER'
+
+
+
+ .. py:attribute:: STRING
+ :value: 'STRING'
+
+
+
+ .. py:attribute:: BOOLEAN
+ :value: 'BOOLEAN'
+
+
+
+ .. py:attribute:: FLOAT
+ :value: 'FLOAT'
+
+
+
+ .. py:attribute:: JSON
+ :value: 'JSON'
+
+
+
+ .. py:attribute:: LIST
+ :value: 'LIST'
+
+
+
+
.. py:class:: StdDevThresholdType
@@ -5342,8 +5613,6 @@ Attributes
:type active_labels_column: str
:param document_format: Format of the input documents.
:type document_format: NLPDocumentFormat
- :param include_longformer: Whether to include the longformer model.
- :type include_longformer: bool
:param minimum_bounding_box_overlap_ratio: Tokens are considered to belong to annotation if the user bounding box is provided and ratio of (token_bounding_box ∩ annotation_bounding_box) / token_bounding_area is greater than the provided value.
:type minimum_bounding_box_overlap_ratio: float
:param save_predicted_pdf: Whether to save predicted PDF documents
@@ -5425,8 +5694,6 @@ Attributes
:type search_chunk_size: int
:param chunk_overlap_fraction: Overlap in chunks while indexing the documents.
:type chunk_overlap_fraction: float
- :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
- :type test_split: int
:param index_fraction: Fraction of the chunk to use for indexing.
:type index_fraction: float
@@ -6092,52 +6359,10 @@ Attributes
Training config for the CUSTOM_ALGORITHM problem type
- :param train_function_name: The name of the train function.
- :type train_function_name: str
- :param predict_many_function_name: The name of the predict many function.
- :type predict_many_function_name: str
- :param training_input_tables: List of tables to use for training.
- :type training_input_tables: List[str]
- :param predict_function_name: Optional name of the predict function if the predict many function is not given.
- :type predict_function_name: str
- :param train_module_name: The name of the train module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type train_module_name: str
- :param predict_module_name: The name of the predict module - only relevant if model is being uploaded from a zip file or github repositoty.
- :type predict_module_name: str
- :param test_split: Percent of dataset to use for test data. We support using a range between 6% to 20% of your dataset to use as test data.
- :type test_split: int
-
- .. py:attribute:: train_function_name
- :type: str
-
-
+ :param timeout_minutes: Timeout for the model training in minutes.
+ :type timeout_minutes: int
- .. py:attribute:: predict_many_function_name
- :type: str
-
-
-
- .. py:attribute:: training_input_tables
- :type: List[str]
-
-
-
- .. py:attribute:: predict_function_name
- :type: str
-
-
-
- .. py:attribute:: train_module_name
- :type: str
-
-
-
- .. py:attribute:: predict_module_name
- :type: str
-
-
-
- .. py:attribute:: test_split
+ .. py:attribute:: timeout_minutes
:type: int
@@ -6379,6 +6604,80 @@ Attributes
+.. py:class:: RestrictFeatureMappings
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Restrict Feature Mappings for Monitor Filtering
+
+ :param feature_name: The name of the feature to restrict the monitor to.
+ :type feature_name: str
+ :param restricted_feature_values: The values of the feature to restrict the monitor to.
+ :type restricted_feature_values: list
+
+ .. py:attribute:: feature_name
+ :type: str
+
+
+
+ .. py:attribute:: restricted_feature_values
+ :type: list
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+.. py:class:: MonitorFilteringConfig
+
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+ Monitor Filtering Configuration
+
+ :param start_time: The start time of the prediction time col
+ :type start_time: str
+ :param end_time: The end time of the prediction time col
+ :type end_time: str
+ :param restrict_feature_mapping: The feature mapping to restrict the monitor to.
+ :type restrict_feature_mapping: RestrictFeatureMappings
+ :param target_class: The target class to restrict the monitor to.
+ :type target_class: str
+
+ .. py:attribute:: start_time
+ :type: str
+
+
+
+ .. py:attribute:: end_time
+ :type: str
+
+
+
+ .. py:attribute:: restrict_feature_mappings
+ :type: List[RestrictFeatureMappings]
+
+
+
+ .. py:attribute:: target_class
+ :type: str
+
+
+
+ .. py:method:: to_dict()
+
+ Standardizes converting an ApiClass to dictionary.
+ Keys of response dictionary are converted to camel case.
+ This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
.. py:class:: AlertConditionConfig
@@ -7809,7 +8108,7 @@ Attributes
.. py:method:: _get_doc_retriever_deployment_info(document_retriever_id)
- .. py:method:: get_matching_documents(document_retriever_id, query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: get_matching_documents(document_retriever_id, query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -7838,6 +8137,8 @@ Attributes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score lower than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will have at least one of the phrases.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
@@ -8346,7 +8647,7 @@ Attributes
:type use_original_csv_names: bool
:param python_function_name: Name of Python Function that contains the source code and function arguments.
:type python_function_name: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
:param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
:type use_gpu: bool
@@ -9015,11 +9316,11 @@ Attributes
:param feature_group_id: The unique ID associated with the feature group.
:type feature_group_id: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
- .. py:method:: update_feature_group_python_function(feature_group_id, python_function_name, python_function_bindings = [])
+ .. py:method:: update_feature_group_python_function(feature_group_id, python_function_name, python_function_bindings = None, cpu_size = None, memory = None, use_gpu = None)
Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function
@@ -9031,8 +9332,14 @@ Attributes
:type feature_group_id: str
:param python_function_name: The name of the python function to be associated with the feature group.
:type python_function_name: str
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of python function arguments.
:type python_function_bindings: list
+ :param cpu_size: Size of the CPU for the feature group python function.
+ :type cpu_size: str
+ :param memory: Memory (in GB) for the feature group python function.
+ :type memory: int
+ :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
+ :type use_gpu: bool
.. py:method:: update_feature_group_sql_definition(feature_group_id, sql)
@@ -9081,7 +9388,7 @@ Attributes
:type package_requirements: list
:param use_original_csv_names: If set to `True`, feature group uses the original column names for input feature groups from CSV datasets.
:type use_original_csv_names: bool
- :param python_function_bindings: List of arguments to be supplied to the function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
+ :param python_function_bindings: List of PythonFunctionArgument objects that represent the bindings for the Python function.
:type python_function_bindings: list
:param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
:type use_gpu: bool
@@ -9897,7 +10204,7 @@ Attributes
:type name: str
- .. py:method:: update_python_model(model_id, function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None)
+ .. py:method:: update_python_model(model_id, function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None, training_config = None)
Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.
@@ -9928,6 +10235,8 @@ Attributes
:type use_gpu: bool
:param is_thread_safe: Whether this model is thread safe
:type is_thread_safe: bool
+ :param training_config: The training config used to train this model.
+ :type training_config: TrainingConfig
:returns: The updated model.
:rtype: Model
@@ -11527,7 +11836,7 @@ Attributes
:type include_all_assignments: bool
- .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None)
+ .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)
Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
@@ -11537,10 +11846,12 @@ Attributes
:type deployment_id: str
:param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
:type query_data: dict
- :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
+ :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
:type add_constraints: list
:param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
:type solve_time_limit_seconds: float
+ :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
+ :type best_alternate_only: bool
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
@@ -11739,7 +12050,7 @@ Attributes
:type doc_infos: list
- .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -11770,6 +12081,8 @@ Attributes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score less than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
@@ -12072,19 +12385,6 @@ Attributes
:type primary_key: str
- .. py:method:: add_feature_group_document(feature_group_id, document)
-
- Adds a document to the feature group.
-
- :param feature_group_id: The unique ID associated with the feature group.
- :type feature_group_id: str
- :param document: The multipart/form-data of the document to add to the feature group.
- :type document: io.TextIOBase
-
- :returns: The feature group document that was added.
- :rtype: FeatureGroupDocument
-
-
.. py:method:: describe_feature_group_row_process_by_key(deployment_id, primary_key_value)
Gets the feature group row process.
@@ -12332,7 +12632,7 @@ Attributes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
@@ -12369,7 +12669,7 @@ Attributes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
@@ -13056,7 +13356,7 @@ Attributes
:type external_application_id: str
- .. py:method:: create_agent(project_id, function_source_code, agent_function_name, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: create_agent(project_id, function_source_code = None, agent_function_name = None, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Creates a new AI agent.
@@ -13082,12 +13382,14 @@ Attributes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The new agent
:rtype: Agent
- .. py:method:: update_agent(model_id, function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: update_agent(model_id, function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Updates an existing AI Agent using user-provided Python code. A new version of the agent will be created and published.
@@ -13109,6 +13411,8 @@ Attributes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The updated agent
:rtype: Agent
@@ -14916,17 +15220,6 @@ Attributes
:rtype: list[FeatureGroupRow]
- .. py:method:: list_pending_feature_group_documents(feature_group_id)
-
- Lists all pending documents added to feature group.
-
- :param feature_group_id: The unique ID associated with the feature group.
- :type feature_group_id: str
-
- :returns: A list of pending feature group documents.
- :rtype: list[FeatureGroupDocument]
-
-
.. py:method:: describe_python_function(name)
Describe a Python Function.
@@ -15369,6 +15662,32 @@ Attributes
:rtype: list[OrganizationSearchResult]
+ .. py:method:: list_agents(project_id)
+
+ Retrieves the list of agents in the specified project.
+
+ :param project_id: The unique identifier associated with the project.
+ :type project_id: str
+
+ :returns: A list of agents in the project.
+ :rtype: list[Agent]
+
+
+ .. py:method:: list_agent_versions(agent_id, limit = 100, start_after_version = None)
+
+ List all versions of an agent.
+
+ :param agent_id: The unique identifier associated with the agent.
+ :type agent_id: str
+ :param limit: If provided, limits the number of agent versions returned.
+ :type limit: int
+ :param start_after_version: Unique string identifier of the version after which the list starts.
+ :type start_after_version: str
+
+ :returns: An array of Agent versions.
+ :rtype: list[AgentVersion]
+
+
.. py:method:: list_document_retrievers(project_id, limit = 100, start_after_id = None)
List all the document retrievers.
@@ -16071,7 +16390,7 @@ Attributes
:type include_all_assignments: bool
- .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None)
+ .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)
Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
@@ -16081,10 +16400,12 @@ Attributes
:type deployment_id: str
:param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
:type query_data: dict
- :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
+ :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
:type add_constraints: list
:param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
:type solve_time_limit_seconds: float
+ :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
+ :type best_alternate_only: bool
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
@@ -16283,7 +16604,7 @@ Attributes
:type doc_infos: list
- .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -16314,6 +16635,8 @@ Attributes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score less than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
@@ -16435,7 +16758,7 @@ Attributes
.. py:data:: __version__
- :value: '1.2.1'
+ :value: '1.2.2'
diff --git a/docs/_sources/autoapi/abacusai/messaging_connector_response/index.rst.txt b/docs/_sources/autoapi/abacusai/messaging_connector_response/index.rst.txt
index 548584e73..276f42311 100644
--- a/docs/_sources/autoapi/abacusai/messaging_connector_response/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/messaging_connector_response/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: MessagingConnectorResponse(client, welcomeMessage=None, defaultMessage=None, disclaimer=None, messagingBotName=None, useDefaultLabel=None, initAckReq=None)
+.. py:class:: MessagingConnectorResponse(client, welcomeMessage=None, defaultMessage=None, disclaimer=None, messagingBotName=None, useDefaultLabel=None, initAckReq=None, defaultLabels=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -38,6 +38,8 @@ Classes
:type useDefaultLabel: bool
:param initAckReq: Set to true if the initial Acknowledgment for the query is required by the user
:type initAckReq: bool
+ :param defaultLabels: Dictionary of default labels, if the user-specified labels aren't set
+ :type defaultLabels: dict
.. py:method:: __repr__()
diff --git a/docs/_sources/autoapi/abacusai/model/index.rst.txt b/docs/_sources/autoapi/abacusai/model/index.rst.txt
index a1dc074f1..9df4204b5 100644
--- a/docs/_sources/autoapi/abacusai/model/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/model/index.rst.txt
@@ -144,7 +144,7 @@ Classes
:type name: str
- .. py:method:: update_python(function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None)
+ .. py:method:: update_python(function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None, training_config = None)
Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.
@@ -173,6 +173,8 @@ Classes
:type use_gpu: bool
:param is_thread_safe: Whether this model is thread safe
:type is_thread_safe: bool
+ :param training_config: The training config used to train this model.
+ :type training_config: TrainingConfig
:returns: The updated model.
:rtype: Model
@@ -370,7 +372,7 @@ Classes
:rtype: ModelTrainingTypeForDeployment
- .. py:method:: update_agent(function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: update_agent(function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Updates an existing AI Agent using user-provided Python code. A new version of the agent will be created and published.
@@ -390,6 +392,8 @@ Classes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The updated agent
:rtype: Agent
diff --git a/docs/_sources/autoapi/abacusai/model_version/index.rst.txt b/docs/_sources/autoapi/abacusai/model_version/index.rst.txt
index 86306be38..bfedbe242 100644
--- a/docs/_sources/autoapi/abacusai/model_version/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/model_version/index.rst.txt
@@ -17,7 +17,7 @@ Classes
-.. py:class:: ModelVersion(client, modelVersion=None, status=None, modelId=None, modelPredictionConfig=None, trainingStartedAt=None, trainingCompletedAt=None, featureGroupVersions=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, cpuSize=None, memory=None, automlComplete=None, trainingFeatureGroupIds=None, trainingDocumentRetrieverVersions=None, documentRetrieverMappings=None, bestAlgorithm=None, defaultAlgorithm=None, featureAnalysisStatus=None, dataClusterInfo=None, customAlgorithmConfigs=None, trainedModelTypes=None, useGpu=None, partialComplete=None, modelFeatureGroupSchemaMappings=None, codeSource={}, modelConfig={}, deployableAlgorithms={})
+.. py:class:: ModelVersion(client, modelVersion=None, status=None, modelId=None, modelPredictionConfig=None, trainingStartedAt=None, trainingCompletedAt=None, featureGroupVersions=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, cpuSize=None, memory=None, automlComplete=None, trainingFeatureGroupIds=None, trainingDocumentRetrieverVersions=None, documentRetrieverMappings=None, bestAlgorithm=None, defaultAlgorithm=None, featureAnalysisStatus=None, dataClusterInfo=None, customAlgorithmConfigs=None, trainedModelTypes=None, useGpu=None, partialComplete=None, modelFeatureGroupSchemaMappings=None, trainingConfigUpdated=None, codeSource={}, modelConfig={}, deployableAlgorithms={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -76,6 +76,8 @@ Classes
:type partialComplete: bool
:param modelFeatureGroupSchemaMappings: mapping of feature group to schema version
:type modelFeatureGroupSchemaMappings: dict
+ :param trainingConfigUpdated: If the training config has been updated since the instance was created.
+ :type trainingConfigUpdated: bool
:param codeSource: If a python model, information on where the source code is located.
:type codeSource: CodeSource
diff --git a/docs/_sources/autoapi/abacusai/pipeline/index.rst.txt b/docs/_sources/autoapi/abacusai/pipeline/index.rst.txt
index f01680264..76f0695ff 100644
--- a/docs/_sources/autoapi/abacusai/pipeline/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/pipeline/index.rst.txt
@@ -155,7 +155,7 @@ Classes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
diff --git a/docs/_sources/autoapi/abacusai/pipeline_step/index.rst.txt b/docs/_sources/autoapi/abacusai/pipeline_step/index.rst.txt
index 0b2e9abba..f55c4f4da 100644
--- a/docs/_sources/autoapi/abacusai/pipeline_step/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/pipeline_step/index.rst.txt
@@ -84,7 +84,7 @@ Classes
:type source_code: str
:param step_input_mappings: List of Python function arguments.
:type step_input_mappings: list
- :param output_variable_mappings: List of Python function ouputs.
+ :param output_variable_mappings: List of Python function outputs.
:type output_variable_mappings: list
:param step_dependencies: List of step names this step depends on.
:type step_dependencies: list
diff --git a/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt b/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
index 229367ec3..b269145d0 100644
--- a/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
@@ -651,7 +651,7 @@ Classes
:type include_all_assignments: bool
- .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None)
+ .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)
Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
@@ -661,10 +661,12 @@ Classes
:type deployment_id: str
:param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
:type query_data: dict
- :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignments involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value.
+ :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
:type add_constraints: list
:param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
:type solve_time_limit_seconds: float
+ :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
+ :type best_alternate_only: bool
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
@@ -863,7 +865,7 @@ Classes
:type doc_infos: list
- .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None)
+ .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -894,6 +896,8 @@ Classes
:type score_multiplier_column: str
:param min_score: If provided, will filter out the results with score less than the value specified.
:type min_score: float
+ :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
+ :type required_phrases: list
:returns: The relevant documentation results found from the document retriever.
:rtype: list[DocumentRetrieverLookupResult]
diff --git a/docs/_sources/autoapi/abacusai/project/index.rst.txt b/docs/_sources/autoapi/abacusai/project/index.rst.txt
index 4d70d3ae4..c74777851 100644
--- a/docs/_sources/autoapi/abacusai/project/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/project/index.rst.txt
@@ -653,7 +653,7 @@ Classes
:rtype: ChatSession
- .. py:method:: create_agent(function_source_code, agent_function_name, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None)
+ .. py:method:: create_agent(function_source_code = None, agent_function_name = None, name = None, memory = None, package_requirements = None, description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None)
Creates a new AI agent.
@@ -677,11 +677,24 @@ Classes
:type agent_input_schema: dict
:param agent_output_schema: The schema of the output data for the agent, which conforms to the react-json-schema-form standard.
:type agent_output_schema: dict
+ :param workflow_graph: The workflow graph for the agent.
+ :type workflow_graph: WorkflowGraph
:returns: The new agent
:rtype: Agent
+ .. py:method:: list_agents()
+
+ Retrieves the list of agents in the specified project.
+
+ :param project_id: The unique identifier associated with the project.
+ :type project_id: str
+
+ :returns: A list of agents in the project.
+ :rtype: list[Agent]
+
+
.. py:method:: create_document_retriever(name, feature_group_id, document_retriever_config = None)
Returns a document retriever that stores embeddings for document chunks in a feature group.
diff --git a/docs/autoapi/abacusai/abacus_api/index.html b/docs/autoapi/abacusai/abacus_api/index.html
index 6eedf75a3..4ec6ee0e1 100644
--- a/docs/autoapi/abacusai/abacus_api/index.html
+++ b/docs/autoapi/abacusai/abacus_api/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/agent/index.html b/docs/autoapi/abacusai/agent/index.html
index db05400a9..df846033d 100644
--- a/docs/autoapi/abacusai/agent/index.html
+++ b/docs/autoapi/abacusai/agent/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
@@ -300,7 +301,7 @@ Classes
Bases: abacusai.return_class.AbstractApiClass
An AI agent.
@@ -374,6 +375,26 @@ Classes
+
-
wait_for_publish(timeout=None)
diff --git a/docs/autoapi/abacusai/agent_data_document_info/index.html b/docs/autoapi/abacusai/agent_data_document_info/index.html
index c4b65b47e..e831a24b3 100644
--- a/docs/autoapi/abacusai/agent_data_document_info/index.html
+++ b/docs/autoapi/abacusai/agent_data_document_info/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/agent_data_execution_result/index.html b/docs/autoapi/abacusai/agent_data_execution_result/index.html
index 443a700d2..570e6f612 100644
--- a/docs/autoapi/abacusai/agent_data_execution_result/index.html
+++ b/docs/autoapi/abacusai/agent_data_execution_result/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/agent_data_upload_result/index.html b/docs/autoapi/abacusai/agent_data_upload_result/index.html
index 854f5eb77..7c3a2cfb8 100644
--- a/docs/autoapi/abacusai/agent_data_upload_result/index.html
+++ b/docs/autoapi/abacusai/agent_data_upload_result/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/agent_version/index.html b/docs/autoapi/abacusai/agent_version/index.html
index c49a368a4..3fc549b06 100644
--- a/docs/autoapi/abacusai/agent_version/index.html
+++ b/docs/autoapi/abacusai/agent_version/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
@@ -300,7 +301,7 @@ Classes
Bases: abacusai.return_class.AbstractApiClass
A version of an AI agent.
diff --git a/docs/autoapi/abacusai/ai_building_task/index.html b/docs/autoapi/abacusai/ai_building_task/index.html
index 746df37ed..4d01a4f45 100644
--- a/docs/autoapi/abacusai/ai_building_task/index.html
+++ b/docs/autoapi/abacusai/ai_building_task/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/algorithm/index.html b/docs/autoapi/abacusai/algorithm/index.html
index df225bffe..824c87ed4 100644
--- a/docs/autoapi/abacusai/algorithm/index.html
+++ b/docs/autoapi/abacusai/algorithm/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/annotation/index.html b/docs/autoapi/abacusai/annotation/index.html
index 0d5e7afb7..78f405165 100644
--- a/docs/autoapi/abacusai/annotation/index.html
+++ b/docs/autoapi/abacusai/annotation/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/annotation_config/index.html b/docs/autoapi/abacusai/annotation_config/index.html
index e56521847..3ee6d73ef 100644
--- a/docs/autoapi/abacusai/annotation_config/index.html
+++ b/docs/autoapi/abacusai/annotation_config/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/annotation_document/index.html b/docs/autoapi/abacusai/annotation_document/index.html
index 0d7cc6144..14561d889 100644
--- a/docs/autoapi/abacusai/annotation_document/index.html
+++ b/docs/autoapi/abacusai/annotation_document/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/annotation_entry/index.html b/docs/autoapi/abacusai/annotation_entry/index.html
index cbf945400..c2579389f 100644
--- a/docs/autoapi/abacusai/annotation_entry/index.html
+++ b/docs/autoapi/abacusai/annotation_entry/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/annotations_status/index.html b/docs/autoapi/abacusai/annotations_status/index.html
index 51f1e9a9e..7bef7cf08 100644
--- a/docs/autoapi/abacusai/annotations_status/index.html
+++ b/docs/autoapi/abacusai/annotations_status/index.html
@@ -144,6 +144,7 @@
abacusai.feature_group_version
abacusai.feature_importance
abacusai.feature_mapping
+abacusai.feature_performance_analysis
abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
diff --git a/docs/autoapi/abacusai/api_class/ai_agents/index.html b/docs/autoapi/abacusai/api_class/ai_agents/index.html
index dddd04764..bb4976747 100644
--- a/docs/autoapi/abacusai/api_class/ai_agents/index.html
+++ b/docs/autoapi/abacusai/api_class/ai_agents/index.html
@@ -96,6 +96,21 @@ ClassesFieldDescriptor
|
Configs for vector store indexing. |
+WorkflowNodeInputMapping
|
+A mapping of input to a workflow node. |
+
+WorkflowNodeOutputMapping
|
+A mapping of output to a workflow node. |
+
+WorkflowGraphNode
|
+A node in an Agent workflow graph. |
+
+WorkflowGraphEdge
|
+An edge in an Agent workflow graph. |
+
+WorkflowGraph
|
+An Agent workflow graph. |
+
@@ -135,6 +150,203 @@ Classes
+
+
+-
+class abacusai.api_class.ai_agents.WorkflowNodeOutputMapping
+Bases: abacusai.api_class.abstract.ApiClass
+A mapping of output to a workflow node.
+
+- Parameters:
+-
+
+
+
+-
+name: str
+
+
+
+-
+variable_type: abacusai.api_class.enums.WorkflowNodeOutputType
+
+
+
+-
+to_dict()
+Standardizes converting an ApiClass to dictionary.
+Keys of response dictionary are converted to camel case.
+This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+
+
+-
+class abacusai.api_class.ai_agents.WorkflowGraphNode(name, input_mappings, output_mappings, function=None, function_name=None, source_code=None, input_schema=None, output_schema=None, package_requirements=None)
+Bases: abacusai.api_class.abstract.ApiClass
+A node in an Agent workflow graph.
+
+- Parameters:
+
+name (str) – Display name of the worflow node.
+input_mappings (List[WorkflowNodeInputMapping]) – List of input mappings for the node.
+output_mappings (List[WorkflowNodeOutputMapping]) – List of output mappings for the node.
+function (callable) – The callable node function reference if available.
+function_name (str) – The name of the function if available.
+source_code (str) – The source code of the function if available.
+input_schema (dict) – The react json schema for the input form if applicable.
+output_schema (dict) – The react json schema for the output if applicable.
+package_requirements (list) – List of package requirements for the node.
+
+
+
+
+-
+to_dict()
+Standardizes converting an ApiClass to dictionary.
+Keys of response dictionary are converted to camel case.
+This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+-
+classmethod from_dict(node)
+
+- Parameters:
+node (dict) –
+
+
+
+
+
+
+
+-
+class abacusai.api_class.ai_agents.WorkflowGraphEdge
+Bases: abacusai.api_class.abstract.ApiClass
+An edge in an Agent workflow graph.
+
+- Parameters:
+
+source (str) – The source node of the edge.
+target (str) – The target node of the edge.
+details (dict) – Additional details about the edge.
+
+
+
+
+-
+source: str
+
+
+
+-
+target: str
+
+
+
+-
+details: dict
+
+
+
+-
+to_nx_edge()
+
+
+
+
+
+-
+class abacusai.api_class.ai_agents.WorkflowGraph
+Bases: abacusai.api_class.abstract.ApiClass
+An Agent workflow graph.
+
+- Parameters:
+
+nodes (List[WorkflowGraphNode]) – A list of nodes in the workflow graph.
+edges (List[WorkflowGraphEdge]) – A list of edges in the workflow graph, where each edge is a tuple of source, target and details.
+
+
+
+
+-
+nodes: List[WorkflowGraphNode]
+
+
+
+-
+edges: List[WorkflowGraphEdge]
+
+
+
+-
+to_dict()
+Standardizes converting an ApiClass to dictionary.
+Keys of response dictionary are converted to camel case.
+This also validates the fields ( type, value, etc ) received in the dictionary.
+
+
+
+-
+classmethod from_dict(graph)
+
+- Parameters:
+graph (dict) –
+
+
+
+
+
+
diff --git a/docs/autoapi/abacusai/api_class/batch_prediction/index.html b/docs/autoapi/abacusai/api_class/batch_prediction/index.html
index 820313ef7..23e84ce4e 100644
--- a/docs/autoapi/abacusai/api_class/batch_prediction/index.html
+++ b/docs/autoapi/abacusai/api_class/batch_prediction/index.html
@@ -170,7 +170,7 @@ Classes) – The number of timestamps to predict in the future. Range: [1, 1000].
item_attributes_to_include_in_the_result (list) – List of columns to include in the prediction output.
explain_predictions (bool) – If True, calculates explanations for the forecasted values along with predictions.
-automate_monitoring (bool) – If True, creates a monitor to calculate the drift for the batch prediction.
+automate_monitoring (bool) – Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
@@ -312,7 +312,7 @@ Classes) – For classification problems specifies the label to which the explanation bounds are applied.
output_columns (list) – A list of column names to include in the prediction result.
explain_predictions (bool) – If True, calculates explanations for the predicted values along with predictions.
-automate_monitoring (bool) – If True, creates a monitor to calculate the drift for the batch prediction.
+automate_monitoring (bool) – Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
@@ -522,7 +522,7 @@ Classes