cli_demo.py 1.2 KB

123456789101112131415161718192021222324252627282930313233
  1. from configs.model_config import *
  2. from chains.local_doc_qa import LocalDocQA
  3. # return top-k text chunk from vector store
  4. VECTOR_SEARCH_TOP_K = 10
  5. # LLM input history length
  6. LLM_HISTORY_LEN = 3
  7. # Show reply with source text from input document
  8. REPLY_WITH_SOURCE = True
  9. if __name__ == "__main__":
  10. local_doc_qa = LocalDocQA()
  11. local_doc_qa.init_cfg(llm_model=LLM_MODEL,
  12. embedding_model=EMBEDDING_MODEL,
  13. embedding_device=EMBEDDING_DEVICE,
  14. llm_history_len=LLM_HISTORY_LEN,
  15. top_k=VECTOR_SEARCH_TOP_K)
  16. vs_path = None
  17. while not vs_path:
  18. filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
  19. vs_path = local_doc_qa.init_knowledge_vector_store(filepath)
  20. history = []
  21. while True:
  22. query = input("Input your question 请输入问题:")
  23. resp, history = local_doc_qa.get_knowledge_based_answer(query=query,
  24. vs_path=vs_path,
  25. chat_history=history)
  26. if REPLY_WITH_SOURCE:
  27. print(resp)
  28. else:
  29. print(resp["result"])