#!/usr/bin/env python # chat.py - given a few configurations, an index of LLM content # Eric Lease Morgan # (c) University of Notre Dame; distributed under a GNU Public Liciense # January 2, 2024 - first cut, but really hacked upon for the past couple of weeks # configure STORAGE = 'index' SIMILARITY = 8 STREAMING = True MESSAGE = 'Apparently, the LLM has yet to be created for this carrel. Call Eric or create one.' PROMPT = '\nAsk anything: ' CONTEXT = 'In the given context, ' SALUTATION = '\nOkay, bye bye, and thank you.' # require from llama_index import StorageContext, load_index_from_storage from pathlib import Path # try to load the index; be careful! try : index = load_index_from_storage( StorageContext.from_defaults( persist_dir=STORAGE ) ) except : exit( MESSAGE ) # initialize engine = index.as_query_engine( similarity_top_k=SIMILARITY, streaming=STREAMING ) # do the work, forever (sort of) while True : # get input try : question = input( PROMPT ) except : exit( SALUTATION ) # search and respond print() engine.query( CONTEXT + question ).print_response_stream() print() # done exit()