You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Notebook knowledge-bases/4_customized-rag-retreive-api-titan-lite-evaluation fails with ImportError, error message below. There seems to be circular dependency with ChatResponseAsyncGen module.
Steps to reproduce:
Run the notebook as is from SageMaker Studio classic notebook instance with DataScience 3.0 kernel.
ImportError: cannot import name 'ChatResponseAsyncGen' from partially initialized module 'llama_index.llms' (most likely due to a circular import) (/opt/conda/lib/python3.10/site-packages/llama_index/llms/__init__.py)
Full Stack trace below.
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
Cell In[7], line 5
3 from botocore.client import Config
4 from langchain.llms.bedrock import Bedrock
----> 5 from llama_index import (
6 ServiceContext,
7 set_global_service_context
8 )
9 from langchain.embeddings.bedrock import BedrockEmbeddings
10 from llama_index.embeddings import LangchainEmbedding
File /opt/conda/lib/python3.10/site-packages/llama_index/__init__.py:13
10 from typing import Callable, Optional
12 # import global eval handler
---> 13 from llama_index.callbacks.global_handlers import set_global_handler
14 from llama_index.data_structs.struct_type import IndexStructType
16 # embeddings
File /opt/conda/lib/python3.10/site-packages/llama_index/callbacks/__init__.py:7
5 from .open_inference_callback import OpenInferenceCallbackHandler
6 from .schema import CBEvent, CBEventType, EventPayload
----> 7 from .token_counting import TokenCountingHandler
8 from .utils import trace_method
9 from .wandb_callback import WandbCallbackHandler
File /opt/conda/lib/python3.10/site-packages/llama_index/callbacks/token_counting.py:6
4 from llama_index.callbacks.base_handler import BaseCallbackHandler
5 from llama_index.callbacks.schema import CBEventType, EventPayload
----> 6 from llama_index.utilities.token_counting import TokenCounter
7 from llama_index.utils import get_tokenizer
10 @dataclass
11 class TokenCountingEvent:
File /opt/conda/lib/python3.10/site-packages/llama_index/utilities/token_counting.py:6
1 # Modified from:
2 # https://github.com/nyno-ai/openai-token-counter
4 from typing import Any, Callable, Dict, List, Optional
----> 6 from llama_index.llms import ChatMessage, MessageRole
7 from llama_index.utils import get_tokenizer
10 class TokenCounter:
File /opt/conda/lib/python3.10/site-packages/llama_index/llms/__init__.py:2
1 from llama_index.llms.ai21 import AI21
----> 2 from llama_index.llms.anthropic import Anthropic
3 from llama_index.llms.anyscale import Anyscale
4 from llama_index.llms.azure_openai import AzureOpenAI
File /opt/conda/lib/python3.10/site-packages/llama_index/llms/anthropic/__init__.py:1
----> 1 from llama_index.llms.anthropic.base import Anthropic
3 __all__ = ["Anthropic"]
File /opt/conda/lib/python3.10/site-packages/llama_index/llms/anthropic/base.py:3
1 from typing import Any, Callable, Dict, Optional, Sequence
----> 3 from llama_index.core.base.llms.types import (
4 ChatMessage,
5 ChatResponse,
6 ChatResponseAsyncGen,
7 ChatResponseGen,
8 CompletionResponse,
9 CompletionResponseAsyncGen,
10 CompletionResponseGen,
11 LLMMetadata,
12 MessageRole,
13 )
14 from llama_index.core.bridge.pydantic import Field, PrivateAttr
15 from llama_index.core.callbacks import CallbackManager
File /opt/conda/lib/python3.10/site-packages/llama_index/core/__init__.py:2
1 from llama_index.core.base_query_engine import BaseQueryEngine
----> 2 from llama_index.core.base_retriever import BaseRetriever
4 __all__ = ["BaseRetriever", "BaseQueryEngine"]
File /opt/conda/lib/python3.10/site-packages/llama_index/core/base_retriever.py:7
5 from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
6 from llama_index.schema import NodeWithScore, QueryBundle, QueryType
----> 7 from llama_index.service_context import ServiceContext
10 class BaseRetriever(PromptMixin):
11 """Base retriever."""
File /opt/conda/lib/python3.10/site-packages/llama_index/service_context.py:9
7 from llama_index.callbacks.base import CallbackManager
8 from llama_index.embeddings.base import BaseEmbedding
----> 9 from llama_index.embeddings.utils import EmbedType, resolve_embed_model
10 from llama_index.indices.prompt_helper import PromptHelper
11 from llama_index.llm_predictor import LLMPredictor
File /opt/conda/lib/python3.10/site-packages/llama_index/embeddings/__init__.py:20
18 from llama_index.embeddings.google_palm import GooglePaLMEmbedding
19 from llama_index.embeddings.gradient import GradientEmbedding
---> 20 from llama_index.embeddings.huggingface import (
21 HuggingFaceEmbedding,
22 HuggingFaceInferenceAPIEmbedding,
23 HuggingFaceInferenceAPIEmbeddings,
24 )
25 from llama_index.embeddings.huggingface_optimum import OptimumEmbedding
26 from llama_index.embeddings.huggingface_utils import DEFAULT_HUGGINGFACE_EMBEDDING_MODEL
File /opt/conda/lib/python3.10/site-packages/llama_index/embeddings/huggingface.py:17
11 from llama_index.embeddings.huggingface_utils import (
12 DEFAULT_HUGGINGFACE_EMBEDDING_MODEL,
13 format_query,
14 format_text,
15 )
16 from llama_index.embeddings.pooling import Pooling
---> 17 from llama_index.llms.huggingface import HuggingFaceInferenceAPI
18 from llama_index.utils import get_cache_dir, infer_torch_device
20 if TYPE_CHECKING:
File /opt/conda/lib/python3.10/site-packages/llama_index/llms/huggingface.py:11
6 from llama_index.callbacks import CallbackManager
7 from llama_index.constants import (
8 DEFAULT_CONTEXT_WINDOW,
9 DEFAULT_NUM_OUTPUTS,
10 )
---> 11 from llama_index.llms import ChatResponseAsyncGen, CompletionResponseAsyncGen
12 from llama_index.llms.base import (
13 LLM,
14 ChatMessage,
(...)
22 llm_completion_callback,
23 )
24 from llama_index.llms.custom import CustomLLM
ImportError: cannot import name 'ChatResponseAsyncGen' from partially initialized module 'llama_index.llms' (most likely due to a circular import) (/opt/conda/lib/python3.10/site-packages/llama_index/llms/__init__.py)
The text was updated successfully, but these errors were encountered:
Notebook
knowledge-bases/4_customized-rag-retreive-api-titan-lite-evaluation
fails with ImportError, error message below. There seems to be circular dependency with ChatResponseAsyncGen module.Steps to reproduce:
ImportError: cannot import name 'ChatResponseAsyncGen' from partially initialized module 'llama_index.llms' (most likely due to a circular import) (/opt/conda/lib/python3.10/site-packages/llama_index/llms/__init__.py)
Full Stack trace below.
The text was updated successfully, but these errors were encountered: