Multi-Step Query Engineο
We have a multi-step query engine thatβs able to decompose a complex query into sequential subquestions. This guide walks you through how to set it up!
Load documents, build the GPTVectorStoreIndexο
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
LLMPredictor,
ServiceContext
)
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from IPython.display import Markdown, display
# LLM Predictor (gpt-3)
llm_predictor_gpt3 = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
service_context_gpt3 = ServiceContext.from_defaults(llm_predictor=llm_predictor_gpt3)
# LLMPredictor (gpt-4)
llm_predictor_gpt4 = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
service_context_gpt4 = ServiceContext.from_defaults(llm_predictor=llm_predictor_gpt4)
# load documents
documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
Query Indexο
from llama_index.indices.query.query_transform.base import StepDecomposeQueryTransform
# gpt-4
step_decompose_transform = StepDecomposeQueryTransform(
llm_predictor_gpt4, verbose=True
)
# gpt-3
step_decompose_transform_gpt3 = StepDecomposeQueryTransform(
llm_predictor_gpt3, verbose=True
)
index_summary = "Used to answer questions about the author"
# set Logging to DEBUG for more detailed outputs
from llama_index.query_engine.multistep_query_engine import MultiStepQueryEngine
query_engine = index.as_query_engine(
service_context=service_context_gpt4
)
query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=step_decompose_transform,
index_summary=index_summary,
)
response_gpt4 = query_engine.query(
"Who was in the first batch of the accelerator program the author started?",
)
display(Markdown(f"<b>{response_gpt4}</b>"))
sub_qa = response_gpt4.extra_info["sub_qa"]
tuples = [(t[0], t[1].response) for t in sub_qa]
print(tuples)
response_gpt4 = query_engine.query(
"In which city did the author found his first company, Viaweb?",
)
print(response_gpt4)
query_engine = index.as_query_engine(
service_context=service_context_gpt3
)
query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=step_decompose_transform_gpt3,
index_summary=index_summary,
)
response_gpt3 = query_engine.query(
"In which city did the author found his first company, Viaweb?",
)
print(response_gpt3)