import os
from dotenv import load_dotenv
load_dotenv()
ATLAS_CONNECTION_STRING = os.getenv("ATLAS_CONNECTION_STRING")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")

def mongodb_connection(database, collection):

    from pymongo.mongo_client import MongoClient
    from pymongo.server_api import ServerApi
    uri = "mongodb+srv://remax_portal:yFjdhkBnxhJjEArq@cluster0.rfbuoht.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0"
    # Create a new client and connect to the server
    client = MongoClient(uri, server_api=ServerApi('1'))
    # Send a ping to confirm a successful connection
    try:
        client.admin.command('ping')
        print("Pinged your deployment. You successfully connected to MongoDB!")
        return uri, client, database, collection
    except Exception as e:
        print(e)
        return None



def load_data_from_mongodb(database, collection, field_names):
    from langchain_community.document_loaders.mongodb import MongodbLoader
    loader = MongodbLoader(
        connection_string=ATLAS_CONNECTION_STRING,
        db_name=database,
        collection_name=collection,
        field_names=field_names
    )
    docs = loader.load()
    ### Update each document object in docs by adding new metadata to each document in docs
    #for doc in docs:
    #    print(doc.metadata)
    return docs


def embedding_pinecone_from_mongodb(pinecone_index, database, collection, namespace,field_names):
    
    from langchain_openai import OpenAIEmbeddings
    embeddings = OpenAIEmbeddings()
    
    docs = load_data_from_mongodb(database, collection,field_names)

    from langchain_pinecone import PineconeVectorStore
    
    try:
        from pinecone import Pinecone
        pc = Pinecone()
        index = pc.Index(pinecone_index)
        index.delete(delete_all=True, namespace=namespace)
    except Exception as e:
        print(f"Could not delete previous Pinecone Vector Store => {e}")

    try:
        vectorstore = PineconeVectorStore.from_documents(docs, embeddings, index_name=pinecone_index, namespace=namespace)
    except Exception as e:
        print(f"Could not create Pinecone Vector Store => {e}")
    return vectorstore


def embedding_chroma_from_mongodb(pinecone_index, database, collection, namespace,field_names):
    from langchain_community.document_loaders.mongodb import MongodbLoader
    from langchain_openai import OpenAIEmbeddings
    from langchain_chroma import Chroma
    loader = MongodbLoader(
        connection_string="mongodb://remax_portal:FrHmi9CZ;HLa@127.0.0.1:27017"
        db_name=database,
        collection_name=collection,
        field_names=field_names
    )
    docs = loader.load()
    
    
    embedding_function = OpenAIEmbeddings()
    db = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db", collection_name="v1_properties_nested")
    # query it
    query = "Procuro uma casa no Rio de Janeiro"
    docs = db.similarity_search(query)

    # print results
    print(docs[0].page_content)


def retrieve_from_pinecone(pinecone_index, namespace):
    from langchain_pinecone import PineconeVectorStore 
    from langchain_openai import OpenAIEmbeddings    
    embeddings = OpenAIEmbeddings()
    vector_store = PineconeVectorStore(index_name=pinecone_index, namespace=namespace, embedding=embeddings)
    return vector_store


def semantic_search(pinecone_index, namespace, query, k=5):
    from langchain_core.prompts import PromptTemplate
    from langchain_core.prompts import ChatPromptTemplate
    from langchain.chains import RetrievalQA
    from langchain_openai import ChatOpenAI
    from langchain_core.output_parsers import StrOutputParser
    from langchain.retrievers.document_compressors import EmbeddingsFilter
    from langchain.retrievers import ContextualCompressionRetriever
    from langchain_openai import OpenAIEmbeddings
    from langchain_core.runnables import RunnableParallel, RunnablePassthrough
    from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser

   
    
    vector_db = retrieve_from_pinecone(pinecone_index, namespace)
    retriever = vector_db.as_retriever(search_type="mmr")
    
    template = """Você é um corretor de imóveis virtual da RE/MAX e está trabalhando no portal de imóveis da empresa.
                    Responda à pergunta ou solicitação com base no contexto que será fornecido, que será informações sobre imóveis. 
                    Sempre informe o número da referência, que pode ser encontrada em 'property_id' do contexto.
                    
                    Contexto: {context}
                    
                    Pergunta: {question}
                    
                    Importante: quando não souber, não invente uma resposta, apenas responda que não é possível responder no momento. 
                    Sempre mencione o contexto fornecido. 
                    """

    
    #prompt = ChatPromptTemplate.from_template(template)
    
    #output_parser = StrOutputParser()
    
    input_variables = ["context","question"]
    prompt = PromptTemplate(
        input_variables=input_variables,
        template=template,
        template_format="f-string",
        output_parser=StrOutputParser()
    )
    
    
    setup_and_retrieval = RunnableParallel(
        {"context": retriever, "question": RunnablePassthrough()}
    )
               

    model  = ChatOpenAI(temperature=0, verbose=True)

    
    #chain = setup_and_retrieval | prompt | model | output_parser
    
    chain = setup_and_retrieval | prompt | model
    
    question = "Casa Rio de Janeiro"
    
    context = retriever.invoke(question)
    
    # similarity = vector_db.similarity_search(query)
    # print("Similarity")
    # print(similarity)
    
    print("Prompt")
    print(prompt.format(context=context, question=question))
    
    response = chain.invoke(question)
    
    print(response)
    
    

    
    # from langchain.retrievers.document_compressors import LLMChainExtractor
    # from langchain_openai import OpenAI
    
    # llm = OpenAI(temperature=0, verbose=True)
    # compressor = LLMChainExtractor.from_llm(llm)
    # compression_retriever = ContextualCompressionRetriever(
    #     base_compressor=compressor, base_retriever=retriever)


    # input_variables = ["context","question"]
    # prompt_template = PromptTemplate(
    #     input_variables=input_variables,
    #     template=template,
    #     template_format="f-string",
    #     output_parser=StrOutputParser()
    # )

    # llm  = ChatOpenAI(temperature=0, verbose=True)
    # chain = prompt_template | llm
    
    # setup_and_retrieval = RunnableParallel(
    # {"context": retriever, "question": RunnablePassthrough()}
    # ) 
    
    # query = "Encontre uma casa com Piscina e Churrasqueira no Rio de Janeiro"
    
    # ### Funciona melhor para textos mais longos
    # # context1 = compression_retriever.invoke(query)
    # # print("Context1")
    # # print(context1)
    
    # context2 = retriever.invoke(query)
    # print("Context2")
    # print(context2)
    
    # context3 = vector_db.similarity_search(query)
    # print("Context3")
    # print(context3)

    # context = context3
    
    # print(template.format(query=query, context=context))
    # output = llm_chain.invoke({"query":query, "context": context})
    # print("Output:")
    # print(output)
    

    
    # llm = ChatOpenAI(temperature=0, verbose=True)
    
    # qa = RetrievalQA.from_chain_type(
    #     llm=llm,
    #     chain_type="stuff",
    #     retriever=retriever,
    #     verbose=True,
    # )  
    
    
    # # #llm_chain = qa | prompt | qa
    
    # # #output = llm_chain.invoke(query)
    
    # # llm_chain = vector_dbprompt_template | vector_db.as_retriever() | llm
    # query = query + "Sempre informe o número da referência, que é o 'property_id' do contexto."
    # output = qa.invoke(query)
    # print("Output QA")
    # print(output['result'])
    # # return llm_chain.invoke(query)['response']