From c1d69536c0c2b9dc61525a00bd720a77dcfce67f Mon Sep 17 00:00:00 2001 From: Tochi-Nwachukwu Date: Fri, 24 Oct 2025 05:17:10 +0100 Subject: [PATCH] Submission for Week 5 Exercise --- .../tochi/whatsapp_chat_rag.ipynb | 207 ++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 week5/community-contributions/tochi/whatsapp_chat_rag.ipynb diff --git a/week5/community-contributions/tochi/whatsapp_chat_rag.ipynb b/week5/community-contributions/tochi/whatsapp_chat_rag.ipynb new file mode 100644 index 0000000..51c97d1 --- /dev/null +++ b/week5/community-contributions/tochi/whatsapp_chat_rag.ipynb @@ -0,0 +1,207 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "300ea30a", + "metadata": {}, + "source": [ + "# Expert Knowledge Worker\n", + "### This project is a question and answering agent based of exported WhatsApp chat messages in from a group chat" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4bc17177", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import glob\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "400ac859", + "metadata": {}, + "outputs": [], + "source": [ + "# imports fomr langchain\n", + "\n", + "from langchain.document_loaders import DirectoryLoader, TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", + "from langchain_chroma import Chroma\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chains import ConversationalRetrievalChain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22199256", + "metadata": {}, + "outputs": [], + "source": [ + "# importing the low cost model and database\n", + "\n", + "MODEL = \"gpt-5-nano\"\n", + "db_name = \"vector_db\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f6be1f4", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b62754d4", + "metadata": {}, + "outputs": [], + "source": [ + "# Read in documents using LangChain's loaders\n", + "# Take only .txt files in the knowledge-base folder (not subfolders)\n", + "\n", + "files = glob.glob(\"knowledge-base/*.txt\")\n", + "\n", + "print(files)\n", + "\n", + "def add_metadata(doc, doc_type):\n", + " doc.metadata[\"doc_type\"] = doc_type\n", + " return doc\n", + "\n", + "text_loader_kwargs = {'encoding': 'utf-8'}\n", + "\n", + "# Load all .txt files from knowledge-base folder\n", + "doc_type = \"knowledge-base\"\n", + "loader = DirectoryLoader(\n", + " \"knowledge-base\", \n", + " glob=\"*.txt\", # Only .txt files in root folder, not subfolders\n", + " loader_cls=TextLoader, \n", + " loader_kwargs=text_loader_kwargs\n", + ")\n", + "documents = loader.load()\n", + "\n", + "# Add metadata to all documents\n", + "documents = [add_metadata(doc, doc_type) for doc in documents]\n", + "\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", + "chunks = text_splitter.split_documents(documents)\n", + "\n", + "print(f\"Total number of chunks: {len(chunks)}\")\n", + "print(f\"Total number of documents: {len(documents)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63aeac25", + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = OpenAIEmbeddings()\n", + "if os.path.exists(db_name):\n", + " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", + "\n", + "\n", + "vectorstore = Chroma.from_documents(\n", + " documents=chunks, embedding=embeddings, persist_directory=db_name\n", + ")\n", + "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5426899a", + "metadata": {}, + "outputs": [], + "source": [ + "# create a new Chat with OpenAI\n", + "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", + "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + "retriever = vectorstore.as_retriever()\n", + "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87e0e7c0", + "metadata": {}, + "outputs": [], + "source": [ + "query = \"Who is mentioned a lot?\"\n", + "result = conversation_chain.invoke({\"question\": query})\n", + "print(result[\"answer\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f36bac2f", + "metadata": {}, + "outputs": [], + "source": [ + "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e087213", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(question, history):\n", + " result = conversation_chain.invoke({\"question\": question})\n", + " return result[\"answer\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1fd9b2d", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}