Files
LLM_Engineering_OLD/week5/community-contributions/Personal Knowledge Worker/Project_PHI.ipynb
2025-06-09 13:36:47 +05:30

928 lines
27 KiB
Plaintext
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "fOxyiqtzKqLg",
"outputId": "714d12c5-775e-42c8-b51c-979a9112b808"
},
"outputs": [],
"source": [
"!pip install -q datasets requests torch peft bitsandbytes transformers trl accelerate sentencepiece tiktoken matplotlib gradio modal ollama langchain langchain-core langchain-text-splitters langchain-openai langchain-chroma langchain-community faiss-cpu feedparser"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "zyxwwUw6LWXK"
},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import glob\n",
"from dotenv import load_dotenv\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Zzqc9nk1L_5w",
"outputId": "0af5e1bb-2ccb-4838-b7a5-76c19285d094"
},
"outputs": [],
"source": [
"from langchain.document_loaders import DirectoryLoader, TextLoader, UnstructuredPDFLoader\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.schema import Document\n",
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n",
"from langchain_chroma import Chroma\n",
"import matplotlib.pyplot as plt\n",
"from sklearn.manifold import TSNE\n",
"import numpy as np\n",
"import plotly.graph_objects as go\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain.embeddings import HuggingFaceEmbeddings\n",
"from huggingface_hub import login\n",
"import torch\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, set_seed\n",
"from google.colab import userdata\n",
"from google.colab import drive\n",
"drive.mount('/content/drive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "u_vbe1itNZ2n"
},
"outputs": [],
"source": [
"base_path = \"/content/drive/MyDrive/sameer-db\"\n",
"folders = glob.glob(os.path.join(base_path, \"*\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "f0lJBMjhMrLO",
"outputId": "5cdc6327-3a3a-4d5b-ca05-4c1383c020e2"
},
"outputs": [],
"source": [
"def add_metadata(doc, doc_type):\n",
" doc.metadata[\"doc_type\"] = doc_type\n",
" return doc\n",
"\n",
"# With thanks to CG and Jon R, students on the course, for this fix needed for some users\n",
"text_loader_kwargs = {'encoding': 'utf-8'}\n",
"# If that doesn't work, some Windows users might need to uncomment the next line instead\n",
"# text_loader_kwargs={'autodetect_encoding': True}\n",
"\n",
"documents = []\n",
"for folder in folders:\n",
" doc_type = os.path.basename(folder)\n",
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n",
" folder_docs = loader.load()\n",
" documents.extend([add_metadata(doc, doc_type) for doc in folder_docs])\n",
"\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
"chunks = text_splitter.split_documents(documents)\n",
"\n",
"print(f\"Total number of chunks: {len(chunks)}\")\n",
"print(f\"Document types found: {set(doc.metadata['doc_type'] for doc in documents)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "zSjwqZ3YNBLp"
},
"outputs": [],
"source": [
"hf_token = userdata.get('HF_TOKEN')\n",
"login(hf_token, add_to_git_credential=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "t7rraUyHNkdP"
},
"outputs": [],
"source": [
"Phi_4 = \"microsoft/Phi-4-mini-instruct\"\n",
"db_name = \"/content/drive/MyDrive/phi_vector_db\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "pDjj2S5ZPzF1"
},
"outputs": [],
"source": [
"quant_config = BitsAndBytesConfig(\n",
" load_in_4bit=True,\n",
" bnb_4bit_use_double_quant=True,\n",
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
" bnb_4bit_quant_type=\"nf4\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 66,
"referenced_widgets": [
"2a0377fc1e0c4c08944be1857c4e2409",
"7c8335e0c3f8459d89f3b9815a896e39",
"0fcb91f0551a4871b747f82e5fa6ff38",
"fa5c6cf8395840e08e2743d6e88190be",
"8613224ada934e7ba57fd5184ea61044",
"1180c8fe49e94873a024d38d33649852",
"4395c417cc854fc48da18d0ddd62671e",
"d678106a6601478cb5712991604788f0",
"5c4a8d25dbc942d5a596c8fa8580a785",
"c1b076c063e04536831d68e5e48f1692",
"9bcee7f185434cd0b1a998448236548c"
]
},
"id": "qzQzgir5VUBF",
"outputId": "1e7198a3-4857-49ab-f368-d430beddbf42"
},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(Phi_4, trust_remote_code=True)\n",
"tokenizer.pad_token = tokenizer.eos_token\n",
"tokenizer.padding_side = \"right\"\n",
"\n",
"base_model = AutoModelForCausalLM.from_pretrained(\n",
" Phi_4,\n",
" quantization_config=quant_config,\n",
" device_map=\"auto\",\n",
")\n",
"base_model.generation_config.pad_token_id = tokenizer.pad_token_id\n",
"\n",
"print(f\"Memory footprint: {base_model.get_memory_footprint() / 1e9:.1f} GB\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MjK3mBKHQBra"
},
"outputs": [],
"source": [
"from langchain.embeddings.base import Embeddings\n",
"from typing import List\n",
"import torch.nn.functional as F"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Q1BIMVW4Pf0A"
},
"outputs": [],
"source": [
"class PHI4Embeddings(Embeddings):\n",
" def __init__(self, tokenizer, model):\n",
" self.tokenizer = tokenizer\n",
" self.model = model\n",
" self.model.eval()\n",
"\n",
" def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
" embeddings = []\n",
" for text in texts:\n",
" with torch.no_grad():\n",
" inputs = self.tokenizer(text, return_tensors=\"pt\", truncation=True, max_length=512).to(self.model.device)\n",
" outputs = self.model(**inputs, output_hidden_states=True)\n",
" hidden_states = outputs.hidden_states[-1] # Last layer\n",
" attention_mask = inputs[\"attention_mask\"].unsqueeze(-1)\n",
" pooled = (hidden_states * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)\n",
" normalized = F.normalize(pooled, p=2, dim=1)\n",
" embeddings.append(normalized[0].cpu().tolist())\n",
" return embeddings\n",
"\n",
" def embed_query(self, text: str) -> List[float]:\n",
" return self.embed_documents([text])[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "7aUTue_mMxof"
},
"outputs": [],
"source": [
"# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n",
"\n",
"embeddings = PHI4Embeddings(tokenizer, base_model)\n",
"\n",
"# Delete if already exists\n",
"\n",
"if os.path.exists(db_name):\n",
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "uWSe-8mATUag",
"outputId": "296804af-2283-435a-908c-48adaa6b4fd9"
},
"outputs": [],
"source": [
"# Create vectorstore\n",
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n",
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "1ZQ6agxtSLp5",
"outputId": "8e5bf8a7-fbaf-427b-9a67-369945aba80e"
},
"outputs": [],
"source": [
"# Let's investigate the vectors\n",
"\n",
"collection = vectorstore._collection\n",
"count = collection.count()\n",
"\n",
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n",
"dimensions = len(sample_embedding)\n",
"print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qBIOPr2YT5FM"
},
"outputs": [],
"source": [
"# Prework\n",
"result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n",
"vectors = np.array(result['embeddings'])\n",
"documents = result['documents']\n",
"metadatas = result['metadatas']\n",
"doc_types = [metadata['doc_type'] for metadata in metadatas]\n",
"colors = [['blue', 'red'][['personal', 'profile'].index(t)] for t in doc_types]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 617
},
"id": "fnuul36bUB3h",
"outputId": "f6cf1650-910a-4a03-f92d-9c200fb37de7"
},
"outputs": [],
"source": [
"# We humans find it easier to visalize things in 2D!\n",
"# Reduce the dimensionality of the vectors to 2D using t-SNE\n",
"# (t-distributed stochastic neighbor embedding)\n",
"\n",
"tsne = TSNE(n_components=2, random_state=42, perplexity=4)\n",
"reduced_vectors = tsne.fit_transform(vectors)\n",
"\n",
"# Create the 2D scatter plot\n",
"fig = go.Figure(data=[go.Scatter(\n",
" x=reduced_vectors[:, 0],\n",
" y=reduced_vectors[:, 1],\n",
" mode='markers',\n",
" marker=dict(size=5, color=colors, opacity=0.8),\n",
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
" hoverinfo='text'\n",
")])\n",
"\n",
"fig.update_layout(\n",
" title='2D Chroma Vector Store Visualization',\n",
" scene=dict(xaxis_title='x',yaxis_title='y'),\n",
" width=800,\n",
" height=600,\n",
" margin=dict(r=20, b=10, l=10, t=40)\n",
")\n",
"\n",
"fig.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 717
},
"id": "Dgaeb7aRUF5d",
"outputId": "47546459-e169-4d2b-d0d7-4ebd135556e0"
},
"outputs": [],
"source": [
"# Let's try 3D!\n",
"\n",
"tsne = TSNE(n_components=3, random_state=42, perplexity=4)\n",
"reduced_vectors = tsne.fit_transform(vectors)\n",
"\n",
"# Create the 3D scatter plot\n",
"fig = go.Figure(data=[go.Scatter3d(\n",
" x=reduced_vectors[:, 0],\n",
" y=reduced_vectors[:, 1],\n",
" z=reduced_vectors[:, 2],\n",
" mode='markers',\n",
" marker=dict(size=5, color=colors, opacity=0.8),\n",
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n",
" hoverinfo='text'\n",
")])\n",
"\n",
"fig.update_layout(\n",
" title='3D Chroma Vector Store Visualization',\n",
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n",
" width=900,\n",
" height=700,\n",
" margin=dict(r=20, b=10, l=10, t=40)\n",
")\n",
"\n",
"fig.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "BZcCyGI3YEwJ",
"outputId": "fd03e6ee-2ec1-4c6b-c14b-986255ca070c"
},
"outputs": [],
"source": [
"from langchain.llms import HuggingFacePipeline\n",
"from transformers import pipeline\n",
"\n",
"pipe = pipeline(\n",
" \"text-generation\",\n",
" model=base_model,\n",
" tokenizer=tokenizer,\n",
" max_new_tokens=4069,\n",
" return_full_text=False,\n",
" temperature=0.7\n",
")\n",
"\n",
"llm = HuggingFacePipeline(pipeline=pipe)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "WDY8-1gJUM1v"
},
"outputs": [],
"source": [
"# set up the conversation memory for the chat\n",
"from langchain.schema import SystemMessage\n",
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
"# memory.chat_memory.add_message(SystemMessage(content='''You are a helpful assistant that answers questions about Sameer Khadatkar **in English only**, based only on the retrieved documents.\n",
"# Do not respond in any other language.'''))\n",
"\n",
"# the retriever is an abstraction over the VectorStore that will be used during RAG\n",
"retriever = vectorstore.as_retriever(k=2)\n",
"\n",
"# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n",
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "dkuv5wD6jCrX"
},
"outputs": [],
"source": [
"def extract_first_helpful_answer(output: str) -> str:\n",
" if \"Helpful Answer:\" in output:\n",
" parts = output.split(\"Helpful Answer:\")\n",
" return parts[0].strip().split(\"\\n\")[0].strip() # Take only the first line after it\n",
" return output.strip()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZY5BH4C3UY1E"
},
"outputs": [],
"source": [
"query = \"Who is Sameer\"\n",
"result = conversation_chain.invoke({\"question\": query})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "7n5PcQw0iRjO",
"outputId": "794c4dad-efde-4220-a9bd-50a1ae156229"
},
"outputs": [],
"source": [
"print(extract_first_helpful_answer(result[\"answer\"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "vW025q5Tkwc3",
"outputId": "e57d34e5-a64c-4e0b-e29b-d887214331c4"
},
"outputs": [],
"source": [
"result"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "JIev764VkCht"
},
"outputs": [],
"source": [
"# set up a new conversation memory for the chat\n",
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n",
"\n",
"# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n",
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OO9o_VBholCx"
},
"outputs": [],
"source": [
"# Wrapping that in a function\n",
"\n",
"def chat(question, history):\n",
" result = conversation_chain.invoke({\"question\": question})\n",
" return extract_first_helpful_answer(result[\"answer\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 646
},
"id": "zOqiuWqCo04a",
"outputId": "fcb89961-1687-4d54-fcdd-ca5c590d69de"
},
"outputs": [],
"source": [
"# And in Gradio:\n",
"\n",
"view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qIYSDiQUo5WX"
},
"outputs": [],
"source": []
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "T4",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"0fcb91f0551a4871b747f82e5fa6ff38": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_d678106a6601478cb5712991604788f0",
"max": 2,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5c4a8d25dbc942d5a596c8fa8580a785",
"value": 2
}
},
"1180c8fe49e94873a024d38d33649852": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2a0377fc1e0c4c08944be1857c4e2409": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_7c8335e0c3f8459d89f3b9815a896e39",
"IPY_MODEL_0fcb91f0551a4871b747f82e5fa6ff38",
"IPY_MODEL_fa5c6cf8395840e08e2743d6e88190be"
],
"layout": "IPY_MODEL_8613224ada934e7ba57fd5184ea61044"
}
},
"4395c417cc854fc48da18d0ddd62671e": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"5c4a8d25dbc942d5a596c8fa8580a785": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"7c8335e0c3f8459d89f3b9815a896e39": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_1180c8fe49e94873a024d38d33649852",
"placeholder": "",
"style": "IPY_MODEL_4395c417cc854fc48da18d0ddd62671e",
"value": "Loadingcheckpointshards:100%"
}
},
"8613224ada934e7ba57fd5184ea61044": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9bcee7f185434cd0b1a998448236548c": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"c1b076c063e04536831d68e5e48f1692": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d678106a6601478cb5712991604788f0": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"fa5c6cf8395840e08e2743d6e88190be": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_c1b076c063e04536831d68e5e48f1692",
"placeholder": "",
"style": "IPY_MODEL_9bcee7f185434cd0b1a998448236548c",
"value": "2/2[00:41&lt;00:00,19.69s/it]"
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 4
}