DRIFT Search
In [1]:
Copied!
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License.
In [2]:
Copied!
import os
from pathlib import Path
import pandas as pd
import tiktoken
from graphrag.config.models.drift_search_config import DRIFTSearchConfig
from graphrag.query.indexer_adapters import (
read_indexer_entities,
read_indexer_relationships,
read_indexer_report_embeddings,
read_indexer_reports,
read_indexer_text_units,
)
from graphrag.query.llm.oai.chat_openai import ChatOpenAI
from graphrag.query.llm.oai.embedding import OpenAIEmbedding
from graphrag.query.llm.oai.typing import OpenaiApiType
from graphrag.query.structured_search.drift_search.drift_context import (
DRIFTSearchContextBuilder,
)
from graphrag.query.structured_search.drift_search.search import DRIFTSearch
from graphrag.vector_stores.lancedb import LanceDBVectorStore
INPUT_DIR = "./inputs/operation dulce"
LANCEDB_URI = f"{INPUT_DIR}/lancedb"
COMMUNITY_REPORT_TABLE = "create_final_community_reports"
ENTITY_TABLE = "create_final_nodes"
ENTITY_EMBEDDING_TABLE = "create_final_entities"
RELATIONSHIP_TABLE = "create_final_relationships"
COVARIATE_TABLE = "create_final_covariates"
TEXT_UNIT_TABLE = "create_final_text_units"
COMMUNITY_LEVEL = 2
# read nodes table to get community and degree data
entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")
print(f"Entity df columns: {entity_df.columns}")
entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)
# load description embeddings to an in-memory lancedb vectorstore
# to connect to a remote db, specify url and port values.
description_embedding_store = LanceDBVectorStore(
collection_name="default-entity-description",
)
description_embedding_store.connect(db_uri=LANCEDB_URI)
full_content_embedding_store = LanceDBVectorStore(
collection_name="default-community-full_content",
)
full_content_embedding_store.connect(db_uri=LANCEDB_URI)
print(f"Entity count: {len(entity_df)}")
entity_df.head()
relationship_df = pd.read_parquet(f"{INPUT_DIR}/{RELATIONSHIP_TABLE}.parquet")
relationships = read_indexer_relationships(relationship_df)
print(f"Relationship count: {len(relationship_df)}")
relationship_df.head()
text_unit_df = pd.read_parquet(f"{INPUT_DIR}/{TEXT_UNIT_TABLE}.parquet")
text_units = read_indexer_text_units(text_unit_df)
print(f"Text unit records: {len(text_unit_df)}")
text_unit_df.head()
import os
from pathlib import Path
import pandas as pd
import tiktoken
from graphrag.config.models.drift_search_config import DRIFTSearchConfig
from graphrag.query.indexer_adapters import (
read_indexer_entities,
read_indexer_relationships,
read_indexer_report_embeddings,
read_indexer_reports,
read_indexer_text_units,
)
from graphrag.query.llm.oai.chat_openai import ChatOpenAI
from graphrag.query.llm.oai.embedding import OpenAIEmbedding
from graphrag.query.llm.oai.typing import OpenaiApiType
from graphrag.query.structured_search.drift_search.drift_context import (
DRIFTSearchContextBuilder,
)
from graphrag.query.structured_search.drift_search.search import DRIFTSearch
from graphrag.vector_stores.lancedb import LanceDBVectorStore
INPUT_DIR = "./inputs/operation dulce"
LANCEDB_URI = f"{INPUT_DIR}/lancedb"
COMMUNITY_REPORT_TABLE = "create_final_community_reports"
ENTITY_TABLE = "create_final_nodes"
ENTITY_EMBEDDING_TABLE = "create_final_entities"
RELATIONSHIP_TABLE = "create_final_relationships"
COVARIATE_TABLE = "create_final_covariates"
TEXT_UNIT_TABLE = "create_final_text_units"
COMMUNITY_LEVEL = 2
# read nodes table to get community and degree data
entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")
print(f"Entity df columns: {entity_df.columns}")
entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)
# load description embeddings to an in-memory lancedb vectorstore
# to connect to a remote db, specify url and port values.
description_embedding_store = LanceDBVectorStore(
collection_name="default-entity-description",
)
description_embedding_store.connect(db_uri=LANCEDB_URI)
full_content_embedding_store = LanceDBVectorStore(
collection_name="default-community-full_content",
)
full_content_embedding_store.connect(db_uri=LANCEDB_URI)
print(f"Entity count: {len(entity_df)}")
entity_df.head()
relationship_df = pd.read_parquet(f"{INPUT_DIR}/{RELATIONSHIP_TABLE}.parquet")
relationships = read_indexer_relationships(relationship_df)
print(f"Relationship count: {len(relationship_df)}")
relationship_df.head()
text_unit_df = pd.read_parquet(f"{INPUT_DIR}/{TEXT_UNIT_TABLE}.parquet")
text_units = read_indexer_text_units(text_unit_df)
print(f"Text unit records: {len(text_unit_df)}")
text_unit_df.head()
Entity df columns: Index(['id', 'human_readable_id', 'title', 'community', 'level', 'degree', 'x',
'y'],
dtype='object')
Entity count: 888
Relationship count: 812
Text unit records: 38
Out[2]:
| id | human_readable_id | text | n_tokens | document_ids | entity_ids | relationship_ids | covariate_ids | |
|---|---|---|---|---|---|---|---|---|
| 0 | aa55265004ced76e9050ed4b7a45c0496e10faa0eddb8a... | 1 | ../\nJACOB COLLIER: Honestly, I think mastery... | 1200 | [1e0886ae010728d10b2972f66b88608dc82b8645d3085... | [9a062709-56dd-4bf2-8b41-926124b7a6f7, f8c54a6... | [9af066c8-031b-4c52-b93b-b37763f6f0f7, 5b15580... | [f91209d1-0939-452e-b51b-be1763e2a27d, f2274c3... |
| 1 | 7f0fb1d3bf517dc76dffa984eec7a25e851e44ead0df82... | 2 | OMBERG: I grew up and started getting into al... | 1200 | [1e0886ae010728d10b2972f66b88608dc82b8645d3085... | [9a062709-56dd-4bf2-8b41-926124b7a6f7, f8c54a6... | [c2ac3612-3aaf-440c-babd-e21f474e0366, 9aab0b4... | [13c74c18-439b-4419-8427-4ba826503055, 7120179... |
| 2 | 27b739ceeddfa100f7be3cf002fd3a27aea2228f1a02c4... | 3 | , you know, and it’s a very linear pathway too... | 1200 | [1e0886ae010728d10b2972f66b88608dc82b8645d3085... | [9a062709-56dd-4bf2-8b41-926124b7a6f7, f8c54a6... | [9af066c8-031b-4c52-b93b-b37763f6f0f7, 5456bcb... | [a67f1c21-32ab-4eaa-b063-c815e7f3ea9d] |
| 3 | d97017305e234cc51554d653447d73b58441e1ff0f99e4... | 4 | you know, we started taking a lot of parts an... | 1200 | [1e0886ae010728d10b2972f66b88608dc82b8645d3085... | [9a062709-56dd-4bf2-8b41-926124b7a6f7, f8c54a6... | [bac3dd9b-f5c8-4966-9dc3-87f1f5976e36, 4f41be8... | [695ac017-7c10-44ad-a681-3b4c1ae86a87] |
| 4 | 2b6d29f8a74b16ea9a70423bce803a08a5b9ed4e6a946b... | 5 | only so much processing that my mind can do i... | 1200 | [1e0886ae010728d10b2972f66b88608dc82b8645d3085... | [9a062709-56dd-4bf2-8b41-926124b7a6f7, f8c54a6... | [d225ff7b-ca47-4fab-8d9a-4f86111526f8, 3fddb40... | [d8a7ad5c-a170-430e-9f9d-902047371ee2] |
In [3]:
Copied!
api_key = os.environ["GRAPHRAG_API_KEY"]
llm_model = os.environ["GRAPHRAG_LLM_MODEL"]
embedding_model = os.environ["GRAPHRAG_EMBEDDING_MODEL"]
chat_llm = ChatOpenAI(
api_key=api_key,
model=llm_model,
api_type=OpenaiApiType.OpenAI, # OpenaiApiType.OpenAI or OpenaiApiType.AzureOpenAI
max_retries=20,
)
token_encoder = tiktoken.encoding_for_model(llm_model)
text_embedder = OpenAIEmbedding(
api_key=api_key,
api_base=None,
api_type=OpenaiApiType.OpenAI,
model=embedding_model,
deployment_name=embedding_model,
max_retries=20,
)
api_key = os.environ["GRAPHRAG_API_KEY"]
llm_model = os.environ["GRAPHRAG_LLM_MODEL"]
embedding_model = os.environ["GRAPHRAG_EMBEDDING_MODEL"]
chat_llm = ChatOpenAI(
api_key=api_key,
model=llm_model,
api_type=OpenaiApiType.OpenAI, # OpenaiApiType.OpenAI or OpenaiApiType.AzureOpenAI
max_retries=20,
)
token_encoder = tiktoken.encoding_for_model(llm_model)
text_embedder = OpenAIEmbedding(
api_key=api_key,
api_base=None,
api_type=OpenaiApiType.OpenAI,
model=embedding_model,
deployment_name=embedding_model,
max_retries=20,
)
In [4]:
Copied!
def read_community_reports(
input_dir: str,
community_report_table: str = COMMUNITY_REPORT_TABLE,
):
"""Embeds the full content of the community reports and saves the DataFrame with embeddings to the output path."""
input_path = Path(input_dir) / f"{community_report_table}.parquet"
return pd.read_parquet(input_path)
report_df = read_community_reports(INPUT_DIR)
reports = read_indexer_reports(
report_df,
entity_df,
COMMUNITY_LEVEL,
content_embedding_col="full_content_embeddings",
)
read_indexer_report_embeddings(reports, full_content_embedding_store)
def read_community_reports(
input_dir: str,
community_report_table: str = COMMUNITY_REPORT_TABLE,
):
"""Embeds the full content of the community reports and saves the DataFrame with embeddings to the output path."""
input_path = Path(input_dir) / f"{community_report_table}.parquet"
return pd.read_parquet(input_path)
report_df = read_community_reports(INPUT_DIR)
reports = read_indexer_reports(
report_df,
entity_df,
COMMUNITY_LEVEL,
content_embedding_col="full_content_embeddings",
)
read_indexer_report_embeddings(reports, full_content_embedding_store)
In [5]:
Copied!
drift_params = DRIFTSearchConfig(
temperature=0,
max_tokens=12_000,
primer_folds=1,
drift_k_followups=3,
n_depth=3,
n=1,
)
context_builder = DRIFTSearchContextBuilder(
chat_llm=chat_llm,
text_embedder=text_embedder,
entities=entities,
relationships=relationships,
reports=reports,
entity_text_embeddings=description_embedding_store,
text_units=text_units,
token_encoder=token_encoder,
config=drift_params,
)
search = DRIFTSearch(
llm=chat_llm, context_builder=context_builder, token_encoder=token_encoder
)
drift_params = DRIFTSearchConfig(
temperature=0,
max_tokens=12_000,
primer_folds=1,
drift_k_followups=3,
n_depth=3,
n=1,
)
context_builder = DRIFTSearchContextBuilder(
chat_llm=chat_llm,
text_embedder=text_embedder,
entities=entities,
relationships=relationships,
reports=reports,
entity_text_embeddings=description_embedding_store,
text_units=text_units,
token_encoder=token_encoder,
config=drift_params,
)
search = DRIFTSearch(
llm=chat_llm, context_builder=context_builder, token_encoder=token_encoder
)
In [6]:
Copied!
resp = await search.asearch("Who is agent Mercer?")
resp = await search.asearch("Who is agent Mercer?")
--------------------------------------------------------------------------- RateLimitError Traceback (most recent call last) Cell In[6], line 1 ----> 1 resp = await search.asearch("Who is agent Mercer?") File ~/work/graphrag/graphrag/graphrag/query/structured_search/drift_search/search.py:203, in DRIFTSearch.asearch(self, query, conversation_history, reduce, **kwargs) 200 # Check if query state is empty 201 if not self.query_state.graph: 202 # Prime the search with the primer --> 203 primer_context, token_ct = self.context_builder.build_context(query) 204 llm_calls["build_context"] = token_ct["llm_calls"] 205 prompt_tokens["build_context"] = token_ct["prompt_tokens"] File ~/work/graphrag/graphrag/graphrag/query/structured_search/drift_search/drift_context.py:200, in DRIFTSearchContextBuilder.build_context(self, query, **kwargs) 191 raise ValueError(missing_reports_error) 193 query_processor = PrimerQueryProcessor( 194 chat_llm=self.chat_llm, 195 text_embedder=self.text_embedder, 196 token_encoder=self.token_encoder, 197 reports=self.reports, 198 ) --> 200 query_embedding, token_ct = query_processor(query) 202 report_df = self.convert_reports_to_df(self.reports) 204 # Check compatibility between query embedding and document embeddings File ~/work/graphrag/graphrag/graphrag/query/structured_search/drift_search/primer.py:97, in PrimerQueryProcessor.__call__(self, query) 86 def __call__(self, query: str) -> tuple[list[float], dict[str, int]]: 87 """ 88 Call method to process the query, expand it, and embed the result. 89 (...) 95 tuple[list[float], int]: List of embeddings for the expanded query and the token count. 96 """ ---> 97 hyde_query, token_ct = self.expand_query(query) 98 log.info("Expanded query: %s", hyde_query) 99 return self.text_embedder.embed(hyde_query), token_ct File ~/work/graphrag/graphrag/graphrag/query/structured_search/drift_search/primer.py:73, in PrimerQueryProcessor.expand_query(self, query) 66 prompt = f"""Create a hypothetical answer to the following query: {query}\n\n 67 Format it to follow the structure of the template below:\n\n 68 {template}\n" 69 Ensure that the hypothetical answer does not reference new named entities that are not present in the original query.""" 71 messages = [{"role": "user", "content": prompt}] ---> 73 text = self.chat_llm.generate(messages) 74 prompt_tokens = num_tokens(prompt, self.token_encoder) 75 output_tokens = num_tokens(text, self.token_encoder) File ~/work/graphrag/graphrag/graphrag/query/llm/oai/chat_openai.py:78, in ChatOpenAI.generate(self, messages, streaming, callbacks, **kwargs) 71 try: 72 retryer = Retrying( 73 stop=stop_after_attempt(self.max_retries), 74 wait=wait_exponential_jitter(max=10), 75 reraise=True, 76 retry=retry_if_exception_type(self.retry_error_types), 77 ) ---> 78 for attempt in retryer: 79 with attempt: 80 return self._generate( 81 messages=messages, 82 streaming=streaming, 83 callbacks=callbacks, 84 **kwargs, 85 ) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/tenacity/__init__.py:443, in BaseRetrying.__iter__(self) 441 retry_state = RetryCallState(self, fn=None, args=(), kwargs={}) 442 while True: --> 443 do = self.iter(retry_state=retry_state) 444 if isinstance(do, DoAttempt): 445 yield AttemptManager(retry_state=retry_state) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/tenacity/__init__.py:376, in BaseRetrying.iter(self, retry_state) 374 result = None 375 for action in self.iter_state.actions: --> 376 result = action(retry_state) 377 return result File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/tenacity/__init__.py:418, in BaseRetrying._post_stop_check_actions.<locals>.exc_check(rs) 416 retry_exc = self.retry_error_cls(fut) 417 if self.reraise: --> 418 raise retry_exc.reraise() 419 raise retry_exc from fut.exception() File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/tenacity/__init__.py:185, in RetryError.reraise(self) 183 def reraise(self) -> t.NoReturn: 184 if self.last_attempt.failed: --> 185 raise self.last_attempt.result() 186 raise self File /opt/hostedtoolcache/Python/3.11.11/x64/lib/python3.11/concurrent/futures/_base.py:449, in Future.result(self, timeout) 447 raise CancelledError() 448 elif self._state == FINISHED: --> 449 return self.__get_result() 451 self._condition.wait(timeout) 453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: File /opt/hostedtoolcache/Python/3.11.11/x64/lib/python3.11/concurrent/futures/_base.py:401, in Future.__get_result(self) 399 if self._exception: 400 try: --> 401 raise self._exception 402 finally: 403 # Break a reference cycle with the exception in self._exception 404 self = None File ~/work/graphrag/graphrag/graphrag/query/llm/oai/chat_openai.py:80, in ChatOpenAI.generate(self, messages, streaming, callbacks, **kwargs) 78 for attempt in retryer: 79 with attempt: ---> 80 return self._generate( 81 messages=messages, 82 streaming=streaming, 83 callbacks=callbacks, 84 **kwargs, 85 ) 86 except RetryError as e: 87 self._reporter.error( 88 message="Error at generate()", details={self.__class__.__name__: str(e)} 89 ) File ~/work/graphrag/graphrag/graphrag/query/llm/oai/chat_openai.py:196, in ChatOpenAI._generate(self, messages, streaming, callbacks, **kwargs) 194 if not model: 195 raise ValueError(_MODEL_REQUIRED_MSG) --> 196 response = self.sync_client.chat.completions.create( # type: ignore 197 model=model, 198 messages=messages, # type: ignore 199 stream=streaming, 200 **kwargs, 201 ) # type: ignore 202 if streaming: 203 full_response = "" File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_utils/_utils.py:279, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) 277 msg = f"Missing required argument: {quote(missing[0])}" 278 raise TypeError(msg) --> 279 return func(*args, **kwargs) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/resources/chat/completions.py:859, in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, reasoning_effort, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout) 817 @required_args(["messages", "model"], ["messages", "model", "stream"]) 818 def create( 819 self, (...) 856 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, 857 ) -> ChatCompletion | Stream[ChatCompletionChunk]: 858 validate_response_format(response_format) --> 859 return self._post( 860 "/chat/completions", 861 body=maybe_transform( 862 { 863 "messages": messages, 864 "model": model, 865 "audio": audio, 866 "frequency_penalty": frequency_penalty, 867 "function_call": function_call, 868 "functions": functions, 869 "logit_bias": logit_bias, 870 "logprobs": logprobs, 871 "max_completion_tokens": max_completion_tokens, 872 "max_tokens": max_tokens, 873 "metadata": metadata, 874 "modalities": modalities, 875 "n": n, 876 "parallel_tool_calls": parallel_tool_calls, 877 "prediction": prediction, 878 "presence_penalty": presence_penalty, 879 "reasoning_effort": reasoning_effort, 880 "response_format": response_format, 881 "seed": seed, 882 "service_tier": service_tier, 883 "stop": stop, 884 "store": store, 885 "stream": stream, 886 "stream_options": stream_options, 887 "temperature": temperature, 888 "tool_choice": tool_choice, 889 "tools": tools, 890 "top_logprobs": top_logprobs, 891 "top_p": top_p, 892 "user": user, 893 }, 894 completion_create_params.CompletionCreateParams, 895 ), 896 options=make_request_options( 897 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout 898 ), 899 cast_to=ChatCompletion, 900 stream=stream or False, 901 stream_cls=Stream[ChatCompletionChunk], 902 ) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1283, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls) 1269 def post( 1270 self, 1271 path: str, (...) 1278 stream_cls: type[_StreamT] | None = None, 1279 ) -> ResponseT | _StreamT: 1280 opts = FinalRequestOptions.construct( 1281 method="post", url=path, json_data=body, files=to_httpx_files(files), **options 1282 ) -> 1283 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:960, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls) 957 else: 958 retries_taken = 0 --> 960 return self._request( 961 cast_to=cast_to, 962 options=options, 963 stream=stream, 964 stream_cls=stream_cls, 965 retries_taken=retries_taken, 966 ) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1049, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls) 1047 if remaining_retries > 0 and self._should_retry(err.response): 1048 err.response.close() -> 1049 return self._retry_request( 1050 input_options, 1051 cast_to, 1052 retries_taken=retries_taken, 1053 response_headers=err.response.headers, 1054 stream=stream, 1055 stream_cls=stream_cls, 1056 ) 1058 # If the response is streamed then we need to explicitly read the response 1059 # to completion before attempting to access the response text. 1060 if not err.response.is_closed: File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1098, in SyncAPIClient._retry_request(self, options, cast_to, retries_taken, response_headers, stream, stream_cls) 1094 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a 1095 # different thread if necessary. 1096 time.sleep(timeout) -> 1098 return self._request( 1099 options=options, 1100 cast_to=cast_to, 1101 retries_taken=retries_taken + 1, 1102 stream=stream, 1103 stream_cls=stream_cls, 1104 ) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1049, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls) 1047 if remaining_retries > 0 and self._should_retry(err.response): 1048 err.response.close() -> 1049 return self._retry_request( 1050 input_options, 1051 cast_to, 1052 retries_taken=retries_taken, 1053 response_headers=err.response.headers, 1054 stream=stream, 1055 stream_cls=stream_cls, 1056 ) 1058 # If the response is streamed then we need to explicitly read the response 1059 # to completion before attempting to access the response text. 1060 if not err.response.is_closed: File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1098, in SyncAPIClient._retry_request(self, options, cast_to, retries_taken, response_headers, stream, stream_cls) 1094 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a 1095 # different thread if necessary. 1096 time.sleep(timeout) -> 1098 return self._request( 1099 options=options, 1100 cast_to=cast_to, 1101 retries_taken=retries_taken + 1, 1102 stream=stream, 1103 stream_cls=stream_cls, 1104 ) [... skipping similar frames: SyncAPIClient._request at line 1049 (17 times), SyncAPIClient._retry_request at line 1098 (17 times)] File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1049, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls) 1047 if remaining_retries > 0 and self._should_retry(err.response): 1048 err.response.close() -> 1049 return self._retry_request( 1050 input_options, 1051 cast_to, 1052 retries_taken=retries_taken, 1053 response_headers=err.response.headers, 1054 stream=stream, 1055 stream_cls=stream_cls, 1056 ) 1058 # If the response is streamed then we need to explicitly read the response 1059 # to completion before attempting to access the response text. 1060 if not err.response.is_closed: File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1098, in SyncAPIClient._retry_request(self, options, cast_to, retries_taken, response_headers, stream, stream_cls) 1094 # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a 1095 # different thread if necessary. 1096 time.sleep(timeout) -> 1098 return self._request( 1099 options=options, 1100 cast_to=cast_to, 1101 retries_taken=retries_taken + 1, 1102 stream=stream, 1103 stream_cls=stream_cls, 1104 ) File ~/.cache/pypoetry/virtualenvs/graphrag-F2jvqev7-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1064, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls) 1061 err.response.read() 1063 log.debug("Re-raising status error") -> 1064 raise self._make_status_error_from_response(err.response) from None 1066 return self._process_response( 1067 cast_to=cast_to, 1068 options=options, (...) 1072 retries_taken=retries_taken, 1073 ) RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}
In [7]:
Copied!
resp.response
resp.response
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[7], line 1 ----> 1 resp.response NameError: name 'resp' is not defined
In [8]:
Copied!
resp.response["nodes"][0]["answer"]
resp.response["nodes"][0]["answer"]
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[8], line 1 ----> 1 resp.response["nodes"][0]["answer"] NameError: name 'resp' is not defined