diff --git a/week5/community-contributions/Personal Knowledge Worker/Project_GPT.ipynb b/week5/community-contributions/Personal Knowledge Worker/Project_GPT.ipynb
new file mode 100644
index 0000000..4bafbb0
--- /dev/null
+++ b/week5/community-contributions/Personal Knowledge Worker/Project_GPT.ipynb
@@ -0,0 +1,388 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "dfe37963-1af6-44fc-a841-8e462443f5e6",
+ "metadata": {},
+ "source": [
+ "## Personal Knowledge Worker for Sameer Khadatkar\n",
+ "\n",
+ "This project will use RAG (Retrieval Augmented Generation) to ensure our question/answering assistant has high accuracy.\n",
+ "\n",
+ "This first implementation will use a simple, brute-force type of RAG.."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ba2779af-84ef-4227-9e9e-6eaf0df87e77",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "from dotenv import load_dotenv\n",
+ "import gradio as gr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "802137aa-8a74-45e0-a487-d1974927d7ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports for langchain, plotly and Chroma\n",
+ "\n",
+ "from langchain.document_loaders import DirectoryLoader, TextLoader\n",
+ "from langchain.text_splitter import CharacterTextSplitter\n",
+ "from langchain.schema import Document\n",
+ "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n",
+ "from langchain_chroma import Chroma\n",
+ "import matplotlib.pyplot as plt\n",
+ "from sklearn.manifold import TSNE\n",
+ "import numpy as np\n",
+ "import plotly.graph_objects as go\n",
+ "from langchain.memory import ConversationBufferMemory\n",
+ "from langchain.chains import ConversationalRetrievalChain\n",
+ "from langchain.embeddings import HuggingFaceEmbeddings"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "58c85082-e417-4708-9efe-81a5d55d1424",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# price is a factor, so we're going to use a low cost model\n",
+ "\n",
+ "MODEL = \"gpt-4o-mini\"\n",
+ "db_name = \"vector_db\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ee78efcb-60fe-449e-a944-40bab26261af",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "730711a9-6ffe-4eee-8f48-d6cfb7314905",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Read in documents using LangChain's loaders\n",
+ "# Take everything in all the sub-folders of our knowledgebase\n",
+ "\n",
+ "folders = glob.glob(\"sameer-db/*\")\n",
+ "\n",
+ "def add_metadata(doc, doc_type):\n",
+ " doc.metadata[\"doc_type\"] = doc_type\n",
+ " return doc\n",
+ "\n",
+ "text_loader_kwargs = {'encoding': 'utf-8'}\n",
+ "\n",
+ "documents = []\n",
+ "for folder in folders:\n",
+ " doc_type = os.path.basename(folder)\n",
+ " loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n",
+ " folder_docs = loader.load()\n",
+ " documents.extend([add_metadata(doc, doc_type) for doc in folder_docs])\n",
+ "\n",
+ "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
+ "chunks = text_splitter.split_documents(documents)\n",
+ "\n",
+ "print(f\"Total number of chunks: {len(chunks)}\")\n",
+ "print(f\"Document types found: {set(doc.metadata['doc_type'] for doc in documents)}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "78998399-ac17-4e28-b15f-0b5f51e6ee23",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n",
+ "# Chroma is a popular open source Vector Database based on SQLLite\n",
+ "\n",
+ "embeddings = OpenAIEmbeddings()\n",
+ "\n",
+ "if os.path.exists(db_name):\n",
+ " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n",
+ "\n",
+ "# Create vectorstore\n",
+ "vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n",
+ "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ff2e7687-60d4-4920-a1d7-a34b9f70a250",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's investigate the vectors\n",
+ "\n",
+ "collection = vectorstore._collection\n",
+ "count = collection.count()\n",
+ "\n",
+ "sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n",
+ "dimensions = len(sample_embedding)\n",
+ "print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b0d45462-a818-441c-b010-b85b32bcf618",
+ "metadata": {},
+ "source": [
+ "## Visualizing the Vector Store\n",
+ "\n",
+ "Let's take a minute to look at the documents and their embedding vectors to see what's going on."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b98adf5e-d464-4bd2-9bdf-bc5b6770263b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n",
+ "vectors = np.array(result['embeddings'])\n",
+ "documents = result['documents']\n",
+ "metadatas = result['metadatas']\n",
+ "doc_types = [metadata['doc_type'] for metadata in metadatas]\n",
+ "colors = [['green', 'red'][['personal', 'profile'].index(t)] for t in doc_types]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "427149d5-e5d8-4abd-bb6f-7ef0333cca21",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# We humans find it easier to visalize things in 2D!\n",
+ "# Reduce the dimensionality of the vectors to 2D using t-SNE\n",
+ "# (t-distributed stochastic neighbor embedding)\n",
+ "\n",
+ "tsne = TSNE(n_components=2, random_state=42,perplexity=5)\n",
+ "reduced_vectors = tsne.fit_transform(vectors)\n",
+ "\n",
+ "# Create the 2D scatter plot\n",
+ "fig = go.Figure(data=[go.Scatter(\n",
+ " x=reduced_vectors[:, 0],\n",
+ " y=reduced_vectors[:, 1],\n",
+ " mode='markers',\n",
+ " marker=dict(size=5, color=colors, opacity=0.8),\n",
+ " text=[f\"Type: {t}
Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
+ " hoverinfo='text'\n",
+ ")])\n",
+ "\n",
+ "fig.update_layout(\n",
+ " title='2D Chroma Vector Store Visualization',\n",
+ " scene=dict(xaxis_title='x',yaxis_title='y'),\n",
+ " width=800,\n",
+ " height=600,\n",
+ " margin=dict(r=20, b=10, l=10, t=40)\n",
+ ")\n",
+ "\n",
+ "fig.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e1418e88-acd5-460a-bf2b-4e6efc88e3dd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's try 3D!\n",
+ "\n",
+ "tsne = TSNE(n_components=3, random_state=42,perplexity=5)\n",
+ "reduced_vectors = tsne.fit_transform(vectors)\n",
+ "\n",
+ "# Create the 3D scatter plot\n",
+ "fig = go.Figure(data=[go.Scatter3d(\n",
+ " x=reduced_vectors[:, 0],\n",
+ " y=reduced_vectors[:, 1],\n",
+ " z=reduced_vectors[:, 2],\n",
+ " mode='markers',\n",
+ " marker=dict(size=5, color=colors, opacity=0.8),\n",
+ " text=[f\"Type: {t}
Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
+ " hoverinfo='text'\n",
+ ")])\n",
+ "\n",
+ "fig.update_layout(\n",
+ " title='3D Chroma Vector Store Visualization',\n",
+ " scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n",
+ " width=900,\n",
+ " height=700,\n",
+ " margin=dict(r=20, b=10, l=10, t=40)\n",
+ ")\n",
+ "\n",
+ "fig.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9468860b-86a2-41df-af01-b2400cc985be",
+ "metadata": {},
+ "source": [
+ "## Time to use LangChain to bring it all together"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b3942a10-9977-4ae7-9acf-968c43ad0d4a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.schema import SystemMessage"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "45c0fb93-0a16-4e55-857b-1f9fd61ec24c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create a new Chat with OpenAI\n",
+ "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n",
+ "\n",
+ "# set up the conversation memory for the chat\n",
+ "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
+ "memory.chat_memory.messages.insert(0, SystemMessage(\n",
+ " content=\"\"\"You are an AI Assistant specialized in providing accurate information about Sameer Khadatkar. Only respond when the question explicitly asks for information. \n",
+ " Keep your answers brief, factual, and based solely on the information provided. Do not speculate or fabricate details. \n",
+ " For example, if the user simply says \"hi,\" respond with: \"How can I help you?\"\n",
+ " \"\"\"\n",
+ "))\n",
+ "\n",
+ "# the retriever is an abstraction over the VectorStore that will be used during RAG\n",
+ "retriever = vectorstore.as_retriever(k=4)\n",
+ "\n",
+ "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n",
+ "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "968e7bf2-e862-4679-a11f-6c1efb6ec8ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's try a simple question\n",
+ "\n",
+ "query = \"Who are you?\"\n",
+ "result = conversation_chain.invoke({\"question\": query})\n",
+ "print(result[\"answer\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5b5a9013-d5d4-4e25-9e7c-cdbb4f33e319",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# set up a new conversation memory for the chat\n",
+ "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
+ "\n",
+ "# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n",
+ "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbbcb659-13ce-47ab-8a5e-01b930494964",
+ "metadata": {},
+ "source": [
+ "## Now we will bring this up in Gradio using the Chat interface -\n",
+ "\n",
+ "A quick and easy way to prototype a chat with an LLM"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c3536590-85c7-4155-bd87-ae78a1467670",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Wrapping that in a function\n",
+ "\n",
+ "def chat(question, history):\n",
+ " result = conversation_chain.invoke({\"question\": question})\n",
+ " return result[\"answer\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b252d8c1-61a8-406d-b57a-8f708a62b014",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# And in Gradio:\n",
+ "\n",
+ "view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e23270cf-2d46-4f9e-aeb3-de1673900d2f",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3476931e-7d94-4b4d-8cc6-67a1bd5fa79c",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week5/community-contributions/Personal Knowledge Worker/Project_PHI.ipynb b/week5/community-contributions/Personal Knowledge Worker/Project_PHI.ipynb
new file mode 100644
index 0000000..b1ad1b8
--- /dev/null
+++ b/week5/community-contributions/Personal Knowledge Worker/Project_PHI.ipynb
@@ -0,0 +1,927 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "fOxyiqtzKqLg",
+ "outputId": "714d12c5-775e-42c8-b51c-979a9112b808"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -q datasets requests torch peft bitsandbytes transformers trl accelerate sentencepiece tiktoken matplotlib gradio modal ollama langchain langchain-core langchain-text-splitters langchain-openai langchain-chroma langchain-community faiss-cpu feedparser"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "zyxwwUw6LWXK"
+ },
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "from dotenv import load_dotenv\n",
+ "import gradio as gr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Zzqc9nk1L_5w",
+ "outputId": "0af5e1bb-2ccb-4838-b7a5-76c19285d094"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain.document_loaders import DirectoryLoader, TextLoader, UnstructuredPDFLoader\n",
+ "from langchain.text_splitter import CharacterTextSplitter\n",
+ "from langchain.schema import Document\n",
+ "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n",
+ "from langchain_chroma import Chroma\n",
+ "import matplotlib.pyplot as plt\n",
+ "from sklearn.manifold import TSNE\n",
+ "import numpy as np\n",
+ "import plotly.graph_objects as go\n",
+ "from langchain.memory import ConversationBufferMemory\n",
+ "from langchain.chains import ConversationalRetrievalChain\n",
+ "from langchain.embeddings import HuggingFaceEmbeddings\n",
+ "from huggingface_hub import login\n",
+ "import torch\n",
+ "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, set_seed\n",
+ "from google.colab import userdata\n",
+ "from google.colab import drive\n",
+ "drive.mount('/content/drive')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "u_vbe1itNZ2n"
+ },
+ "outputs": [],
+ "source": [
+ "base_path = \"/content/drive/MyDrive/sameer-db\"\n",
+ "folders = glob.glob(os.path.join(base_path, \"*\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "f0lJBMjhMrLO",
+ "outputId": "5cdc6327-3a3a-4d5b-ca05-4c1383c020e2"
+ },
+ "outputs": [],
+ "source": [
+ "def add_metadata(doc, doc_type):\n",
+ " doc.metadata[\"doc_type\"] = doc_type\n",
+ " return doc\n",
+ "\n",
+ "# With thanks to CG and Jon R, students on the course, for this fix needed for some users\n",
+ "text_loader_kwargs = {'encoding': 'utf-8'}\n",
+ "# If that doesn't work, some Windows users might need to uncomment the next line instead\n",
+ "# text_loader_kwargs={'autodetect_encoding': True}\n",
+ "\n",
+ "documents = []\n",
+ "for folder in folders:\n",
+ " doc_type = os.path.basename(folder)\n",
+ " loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n",
+ " folder_docs = loader.load()\n",
+ " documents.extend([add_metadata(doc, doc_type) for doc in folder_docs])\n",
+ "\n",
+ "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
+ "chunks = text_splitter.split_documents(documents)\n",
+ "\n",
+ "print(f\"Total number of chunks: {len(chunks)}\")\n",
+ "print(f\"Document types found: {set(doc.metadata['doc_type'] for doc in documents)}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "zSjwqZ3YNBLp"
+ },
+ "outputs": [],
+ "source": [
+ "hf_token = userdata.get('HF_TOKEN')\n",
+ "login(hf_token, add_to_git_credential=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "t7rraUyHNkdP"
+ },
+ "outputs": [],
+ "source": [
+ "Phi_4 = \"microsoft/Phi-4-mini-instruct\"\n",
+ "db_name = \"/content/drive/MyDrive/phi_vector_db\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "pDjj2S5ZPzF1"
+ },
+ "outputs": [],
+ "source": [
+ "quant_config = BitsAndBytesConfig(\n",
+ " load_in_4bit=True,\n",
+ " bnb_4bit_use_double_quant=True,\n",
+ " bnb_4bit_compute_dtype=torch.bfloat16,\n",
+ " bnb_4bit_quant_type=\"nf4\"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 66,
+ "referenced_widgets": [
+ "2a0377fc1e0c4c08944be1857c4e2409",
+ "7c8335e0c3f8459d89f3b9815a896e39",
+ "0fcb91f0551a4871b747f82e5fa6ff38",
+ "fa5c6cf8395840e08e2743d6e88190be",
+ "8613224ada934e7ba57fd5184ea61044",
+ "1180c8fe49e94873a024d38d33649852",
+ "4395c417cc854fc48da18d0ddd62671e",
+ "d678106a6601478cb5712991604788f0",
+ "5c4a8d25dbc942d5a596c8fa8580a785",
+ "c1b076c063e04536831d68e5e48f1692",
+ "9bcee7f185434cd0b1a998448236548c"
+ ]
+ },
+ "id": "qzQzgir5VUBF",
+ "outputId": "1e7198a3-4857-49ab-f368-d430beddbf42"
+ },
+ "outputs": [],
+ "source": [
+ "tokenizer = AutoTokenizer.from_pretrained(Phi_4, trust_remote_code=True)\n",
+ "tokenizer.pad_token = tokenizer.eos_token\n",
+ "tokenizer.padding_side = \"right\"\n",
+ "\n",
+ "base_model = AutoModelForCausalLM.from_pretrained(\n",
+ " Phi_4,\n",
+ " quantization_config=quant_config,\n",
+ " device_map=\"auto\",\n",
+ ")\n",
+ "base_model.generation_config.pad_token_id = tokenizer.pad_token_id\n",
+ "\n",
+ "print(f\"Memory footprint: {base_model.get_memory_footprint() / 1e9:.1f} GB\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "MjK3mBKHQBra"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain.embeddings.base import Embeddings\n",
+ "from typing import List\n",
+ "import torch.nn.functional as F"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Q1BIMVW4Pf0A"
+ },
+ "outputs": [],
+ "source": [
+ "class PHI4Embeddings(Embeddings):\n",
+ " def __init__(self, tokenizer, model):\n",
+ " self.tokenizer = tokenizer\n",
+ " self.model = model\n",
+ " self.model.eval()\n",
+ "\n",
+ " def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
+ " embeddings = []\n",
+ " for text in texts:\n",
+ " with torch.no_grad():\n",
+ " inputs = self.tokenizer(text, return_tensors=\"pt\", truncation=True, max_length=512).to(self.model.device)\n",
+ " outputs = self.model(**inputs, output_hidden_states=True)\n",
+ " hidden_states = outputs.hidden_states[-1] # Last layer\n",
+ " attention_mask = inputs[\"attention_mask\"].unsqueeze(-1)\n",
+ " pooled = (hidden_states * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)\n",
+ " normalized = F.normalize(pooled, p=2, dim=1)\n",
+ " embeddings.append(normalized[0].cpu().tolist())\n",
+ " return embeddings\n",
+ "\n",
+ " def embed_query(self, text: str) -> List[float]:\n",
+ " return self.embed_documents([text])[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "7aUTue_mMxof"
+ },
+ "outputs": [],
+ "source": [
+ "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n",
+ "\n",
+ "embeddings = PHI4Embeddings(tokenizer, base_model)\n",
+ "\n",
+ "# Delete if already exists\n",
+ "\n",
+ "if os.path.exists(db_name):\n",
+ " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "uWSe-8mATUag",
+ "outputId": "296804af-2283-435a-908c-48adaa6b4fd9"
+ },
+ "outputs": [],
+ "source": [
+ "# Create vectorstore\n",
+ "vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n",
+ "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "1ZQ6agxtSLp5",
+ "outputId": "8e5bf8a7-fbaf-427b-9a67-369945aba80e"
+ },
+ "outputs": [],
+ "source": [
+ "# Let's investigate the vectors\n",
+ "\n",
+ "collection = vectorstore._collection\n",
+ "count = collection.count()\n",
+ "\n",
+ "sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n",
+ "dimensions = len(sample_embedding)\n",
+ "print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "qBIOPr2YT5FM"
+ },
+ "outputs": [],
+ "source": [
+ "# Prework\n",
+ "result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n",
+ "vectors = np.array(result['embeddings'])\n",
+ "documents = result['documents']\n",
+ "metadatas = result['metadatas']\n",
+ "doc_types = [metadata['doc_type'] for metadata in metadatas]\n",
+ "colors = [['blue', 'red'][['personal', 'profile'].index(t)] for t in doc_types]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 617
+ },
+ "id": "fnuul36bUB3h",
+ "outputId": "f6cf1650-910a-4a03-f92d-9c200fb37de7"
+ },
+ "outputs": [],
+ "source": [
+ "# We humans find it easier to visalize things in 2D!\n",
+ "# Reduce the dimensionality of the vectors to 2D using t-SNE\n",
+ "# (t-distributed stochastic neighbor embedding)\n",
+ "\n",
+ "tsne = TSNE(n_components=2, random_state=42, perplexity=4)\n",
+ "reduced_vectors = tsne.fit_transform(vectors)\n",
+ "\n",
+ "# Create the 2D scatter plot\n",
+ "fig = go.Figure(data=[go.Scatter(\n",
+ " x=reduced_vectors[:, 0],\n",
+ " y=reduced_vectors[:, 1],\n",
+ " mode='markers',\n",
+ " marker=dict(size=5, color=colors, opacity=0.8),\n",
+ " text=[f\"Type: {t}
Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
+ " hoverinfo='text'\n",
+ ")])\n",
+ "\n",
+ "fig.update_layout(\n",
+ " title='2D Chroma Vector Store Visualization',\n",
+ " scene=dict(xaxis_title='x',yaxis_title='y'),\n",
+ " width=800,\n",
+ " height=600,\n",
+ " margin=dict(r=20, b=10, l=10, t=40)\n",
+ ")\n",
+ "\n",
+ "fig.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 717
+ },
+ "id": "Dgaeb7aRUF5d",
+ "outputId": "47546459-e169-4d2b-d0d7-4ebd135556e0"
+ },
+ "outputs": [],
+ "source": [
+ "# Let's try 3D!\n",
+ "\n",
+ "tsne = TSNE(n_components=3, random_state=42, perplexity=4)\n",
+ "reduced_vectors = tsne.fit_transform(vectors)\n",
+ "\n",
+ "# Create the 3D scatter plot\n",
+ "fig = go.Figure(data=[go.Scatter3d(\n",
+ " x=reduced_vectors[:, 0],\n",
+ " y=reduced_vectors[:, 1],\n",
+ " z=reduced_vectors[:, 2],\n",
+ " mode='markers',\n",
+ " marker=dict(size=5, color=colors, opacity=0.8),\n",
+ " text=[f\"Type: {t}
Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
+ " hoverinfo='text'\n",
+ ")])\n",
+ "\n",
+ "fig.update_layout(\n",
+ " title='3D Chroma Vector Store Visualization',\n",
+ " scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n",
+ " width=900,\n",
+ " height=700,\n",
+ " margin=dict(r=20, b=10, l=10, t=40)\n",
+ ")\n",
+ "\n",
+ "fig.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "BZcCyGI3YEwJ",
+ "outputId": "fd03e6ee-2ec1-4c6b-c14b-986255ca070c"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain.llms import HuggingFacePipeline\n",
+ "from transformers import pipeline\n",
+ "\n",
+ "pipe = pipeline(\n",
+ " \"text-generation\",\n",
+ " model=base_model,\n",
+ " tokenizer=tokenizer,\n",
+ " max_new_tokens=4069,\n",
+ " return_full_text=False,\n",
+ " temperature=0.7\n",
+ ")\n",
+ "\n",
+ "llm = HuggingFacePipeline(pipeline=pipe)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "WDY8-1gJUM1v"
+ },
+ "outputs": [],
+ "source": [
+ "# set up the conversation memory for the chat\n",
+ "from langchain.schema import SystemMessage\n",
+ "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
+ "# memory.chat_memory.add_message(SystemMessage(content='''You are a helpful assistant that answers questions about Sameer Khadatkar **in English only**, based only on the retrieved documents.\n",
+ "# Do not respond in any other language.'''))\n",
+ "\n",
+ "# the retriever is an abstraction over the VectorStore that will be used during RAG\n",
+ "retriever = vectorstore.as_retriever(k=2)\n",
+ "\n",
+ "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n",
+ "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "dkuv5wD6jCrX"
+ },
+ "outputs": [],
+ "source": [
+ "def extract_first_helpful_answer(output: str) -> str:\n",
+ " if \"Helpful Answer:\" in output:\n",
+ " parts = output.split(\"Helpful Answer:\")\n",
+ " return parts[0].strip().split(\"\\n\")[0].strip() # Take only the first line after it\n",
+ " return output.strip()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ZY5BH4C3UY1E"
+ },
+ "outputs": [],
+ "source": [
+ "query = \"Who is Sameer\"\n",
+ "result = conversation_chain.invoke({\"question\": query})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "7n5PcQw0iRjO",
+ "outputId": "794c4dad-efde-4220-a9bd-50a1ae156229"
+ },
+ "outputs": [],
+ "source": [
+ "print(extract_first_helpful_answer(result[\"answer\"]))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "vW025q5Tkwc3",
+ "outputId": "e57d34e5-a64c-4e0b-e29b-d887214331c4"
+ },
+ "outputs": [],
+ "source": [
+ "result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "JIev764VkCht"
+ },
+ "outputs": [],
+ "source": [
+ "# set up a new conversation memory for the chat\n",
+ "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
+ "\n",
+ "# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n",
+ "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "OO9o_VBholCx"
+ },
+ "outputs": [],
+ "source": [
+ "# Wrapping that in a function\n",
+ "\n",
+ "def chat(question, history):\n",
+ " result = conversation_chain.invoke({\"question\": question})\n",
+ " return extract_first_helpful_answer(result[\"answer\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 646
+ },
+ "id": "zOqiuWqCo04a",
+ "outputId": "fcb89961-1687-4d54-fcdd-ca5c590d69de"
+ },
+ "outputs": [],
+ "source": [
+ "# And in Gradio:\n",
+ "\n",
+ "view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "qIYSDiQUo5WX"
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "0fcb91f0551a4871b747f82e5fa6ff38": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_d678106a6601478cb5712991604788f0",
+ "max": 2,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_5c4a8d25dbc942d5a596c8fa8580a785",
+ "value": 2
+ }
+ },
+ "1180c8fe49e94873a024d38d33649852": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2a0377fc1e0c4c08944be1857c4e2409": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_7c8335e0c3f8459d89f3b9815a896e39",
+ "IPY_MODEL_0fcb91f0551a4871b747f82e5fa6ff38",
+ "IPY_MODEL_fa5c6cf8395840e08e2743d6e88190be"
+ ],
+ "layout": "IPY_MODEL_8613224ada934e7ba57fd5184ea61044"
+ }
+ },
+ "4395c417cc854fc48da18d0ddd62671e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "5c4a8d25dbc942d5a596c8fa8580a785": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "7c8335e0c3f8459d89f3b9815a896e39": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_1180c8fe49e94873a024d38d33649852",
+ "placeholder": "",
+ "style": "IPY_MODEL_4395c417cc854fc48da18d0ddd62671e",
+ "value": "Loading checkpoint shards: 100%"
+ }
+ },
+ "8613224ada934e7ba57fd5184ea61044": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "9bcee7f185434cd0b1a998448236548c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "c1b076c063e04536831d68e5e48f1692": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d678106a6601478cb5712991604788f0": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "fa5c6cf8395840e08e2743d6e88190be": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c1b076c063e04536831d68e5e48f1692",
+ "placeholder": "",
+ "style": "IPY_MODEL_9bcee7f185434cd0b1a998448236548c",
+ "value": " 2/2 [00:41<00:00, 19.69s/it]"
+ }
+ }
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/week5/community-contributions/Personal Knowledge Worker/sameer-db/personal/sameer.md b/week5/community-contributions/Personal Knowledge Worker/sameer-db/personal/sameer.md
new file mode 100644
index 0000000..c585424
--- /dev/null
+++ b/week5/community-contributions/Personal Knowledge Worker/sameer-db/personal/sameer.md
@@ -0,0 +1,23 @@
+# Sameer Khadatkar
+
+Hi, I am **Sameer Khadatkar**, born and brought up in **Nagpur**.
+
+I completed my schooling from **Dinanath Junior College and High School, Nagpur** up to 12th standard. After that, I moved to **Amravati** for my Bachelor's degree.
+
+### Academic Journey
+I prepared for the **GATE Mechanical Engineering (ME)** exam:
+- **2020**: Rank **377**
+
+With this rank, I secured admission to the prestigious **Indian Institute of Science (IISc), Bangalore**.
+
+### Career
+I later got placed at **Wells Fargo**, Hyderabad.
+
+### Personal Life
+- I got married to my batchmate from Government College of Engineering Amravati.
+
+### Hobbies & Interests
+I played **Cycle Polo** up to my 8th standard and even competed at the **national level**.
+
+### Family
+- Parents, elder sister and wife.
diff --git a/week5/community-contributions/Personal Knowledge Worker/sameer-db/profile/Profile.md b/week5/community-contributions/Personal Knowledge Worker/sameer-db/profile/Profile.md
new file mode 100644
index 0000000..d9853cd
--- /dev/null
+++ b/week5/community-contributions/Personal Knowledge Worker/sameer-db/profile/Profile.md
@@ -0,0 +1,145 @@
+# Sameer Raju Khadatkar
+
+**Quant AI/ML @ Wells Fargo | M.Tech. (CDS) @ IISc, Bangalore | B.Tech. (Mechanical) @ GCOE, Amravati**
+📍 Hyderabad, Telangana, India
+📧 sameer123khadatkar@gmail.com
+🔗 [LinkedIn](https://www.linkedin.com/in/sameer-khadatkar/)
+
+---
+
+## Summary
+
+I currently serve as a Quantitative Analytics Specialist within Wells Fargo's Model Risk Management (MRM) team at India and Philippines. My primary responsibility involves validating AI/ML models, with a focus on fraud detection, as well as models used in marketing, credit scoring, and natural language processing (NLP). In this role, I ensure the conceptual soundness of models, conduct performance testing, conduct explainability analysis and rigorously challenge models by developing challenger models to detect weaknesses.
+
+Additionally, I ensure compliance with regulatory standards set by Wells Fargo, in alignment with guidelines from the Federal Reserve and the OCC. I work closely with model development and risk management teams, providing validation feedback and recommending improvements. I also contribute to documentation and reporting, preparing validation reports, and ensuring the ongoing monitoring of model performance.
+
+With a strong foundation in Machine Learning, Deep Learning, and High-Performance Computing gained during my graduate studies at the Indian Institute of Science, Bangalore, and a Bachelor's degree in Mechanical Engineering, I bring a unique blend of skills at the intersection of advanced technology and engineering. My expertise allows me to tackle complex challenges, drive innovation, and contribute to cutting-edge solutions in diverse industries.
+
+---
+
+## Professional Experience
+
+### Wells Fargo International Solutions Private Ltd
+**Quantitative Analytics Specialist – AVP**
+📍 Hyderabad, Telangana, India
+📅 August 2022 – September 2023
+
+- Collaborating with a team overseeing an inventory of ∼300 models focused on Fraud Detection, primarily utilizing Logistic Regression, Extreme Gradient Boosting (XGBoost), and Neural Network models.
+- Conduct validation of AI/ML models by ensuring conceptual soundness, performing performance testing, carrying out explainability analysis, and developing surrogate, challenger, and offset models to uncover potential weaknesses.
+- Joined the team during its expansion in India, playing a key role in building trust with US stakeholders. Recognized with the **Manager’s Spotlight Award** for outstanding dedication and contributions.
+- Developing a module to assist Validators in benchmarking anomaly detection models (Isolation Forest, Extended Isolation Forest, Autoencoders, Histogram-Based Outlier Score (HBOS), etc.) and assessing them using clustering performance metrics.
+- Created a validation playbook for fraud detection vendor models and developed an Excel-based policy library to facilitate quick reference for team members.
+
+---
+
+## Highlighted Projects at Wells Fargo
+
+### ✅ Check Authorization Model | Validation
+
+- Validated a high-impact machine learning model for check authorization, ensuring compliance with regulatory and bank's MRM standards.
+- Reviewed model objectives, assumptions, architecture, and data pipeline.
+- Assessed performance using AUC, recall, KS statistic, and PSI across time.
+- Performed explainability analysis using multicollinearity checks, surrogate models (overall and segment level), SHAP, PDP, H-Statistic, 2D-PDPs, and sensitivity analysis.
+- Identified local weaknesses through segmentation and built offset models to detect missed signals.
+- Developed challenger models using YOLOv5, SigNet, TrOCR (Transformer-based OCR), XGBoost model, and pixel-based feature engineering.
+
+### 🧠 Word Embedding Explainability Research
+
+- Collaborated with the Bank’s Chief Model Risk Officer on a research project focused on the explainability of word embeddings using clustering techniques such as Spectral Clustering, HDBSCAN, and analysis of ReLU neural network activation patterns.
+- Utilized Sentence Transformer embeddings (SBERT) and applied dimensionality reduction methods including PCA, UMAP, and t-SNE for cluster interpretation and visualization.
+- Extended the research by developing a Mixture of Experts model leveraging XGBoost.
+
+---
+
+## Education
+
+**Indian Institute of Science (IISc), Bangalore**
+📅 2020 – 2022
+🎓 Master of Technology (M.Tech.), Computational and Data Sciences
+📍 Bengaluru, Karnataka
+**CGPA:** 9.1 / 10.0
+
+**Government College of Engineering, Amravati (GCoEA)**
+📅 2015 – 2019
+🎓 Bachelor of Technology (B.Tech.), Mechanical Engineering
+📍 Amravati, Maharashtra
+**CGPA:** 8.29 / 10.0
+
+---
+
+## Certifications
+
+- Advanced Data Science with IBM (Coursera)
+- HYPERMESH (SHELL MESH AND SOLID MESH)
+- Introduction to Big Data (Coursera)
+- MASTERCAM (Design, Turning and Milling)
+- CREO PARAMETRIC
+
+---
+
+## Research Publication
+
+**Subspace Recursive Fermi-Operator Expansion Strategies for Large-Scale DFT Eigenvalue Problems on HPC Architectures**
+📝 Sameer Khadatkar, Phani Motamarri (MATRIX Lab)
+📅 July 20, 2023
+📚 *Journal of Chemical Physics, 159, 031102 (2023)*
+🔗 [Publication Link](https://pubs.aip.org/aip/jcp/article/159/3/031102/2903241/Subspace-recursive-Fermi-operator-expansion)
+
+- Implemented recursive Fermi-operator expansion methods on multi-node CPU (PARAM Pravega) and GPU (ORNL Summit) systems for large-scale DFT problems.
+- Applied mixed-precision strategies achieving 2× to 4× speedup over diagonalization.
+- Benchmarked using MPI and SLATE for distributed dense linear algebra.
+
+---
+
+## Academic, Independent and Other Projects
+
+- **LLM-Powered Multimodal Airline Chatbot**: Built a chatbot with GPT-4o-mini, supporting both text and voice, generating pop-art city images. Stack: Python, Gradio, custom tools.
+- **Future Stock Price Prediction for MAANG**: Used yfinance, Stateful LSTM vs XGBoost. LSTM outperformed with ~0.02 MAE.
+- **Duplicate Question Detection**: LSTM Siamese Network with Word2Vec and GloVe. GloVe performed better.
+- **Music Genre Classification**: Used MFCCs and spectral features. Best result: 76% ± 3% accuracy with SVM.
+- **Algorithm Implementation from Scratch**: PCA, LDA, GMM, TF-IDF, and backpropagation for DNNs.
+
+---
+
+## Skills
+
+**Knowledge Areas:**
+Model Risk Management, Machine Learning, Deep Learning, High-Performance Computing
+
+**Programming Languages:**
+Python, C, C++ (OpenMP, MPI, CUDA), SQL
+
+**Python Libraries & Tools:**
+Numpy, Pandas, Scikit-Learn, PyTorch, TensorFlow (Keras), PySpark, Matplotlib
+
+---
+
+## Relevant Courses
+
+- Machine Learning for Signal Processing (IISc)
+- Advanced Data Science with IBM (Coursera)
+- Deep Learning (NPTEL)
+- Pattern Recognition and Neural Networks (NPTEL)
+- Numerical Linear Algebra (IISc)
+- Data Analysis and Visualization (IISc)
+- Numerical Solution of Differential Equations (IISc)
+- Parallel Programming (IISc)
+- Introduction to Big Data (Coursera)
+- LLM Engineering: Master AI, Large Language Models & Agents (Udemy)
+
+---
+
+## Extracurricular Activities
+
+- **Project Associate** at MATRIX Lab, CDS Department, IISc.
+- **Teaching Assistant** for “DS284: Numerical Linear Algebra” at IISc.
+- Led suspension operations for SAE BAJA Team at GCoE Amravati.
+- Organized Annual Social Gathering as Joint Secretary at GCoE Amravati.
+
+---
+
+## Top Skills
+
+- Data Reporting
+- SQL
+- Microsoft Excel