From a4db90be83b516b4fa5860eb5e1c05905e169086 Mon Sep 17 00:00:00 2001 From: SABEEH Shaikh Date: Tue, 20 May 2025 22:16:19 +0200 Subject: [PATCH 01/23] Added my dataset generator to contributions folder --- .../llm_dataset_generator.ipynb | 1801 +++++++++++++++++ 1 file changed, 1801 insertions(+) create mode 100644 week3/community-contributions/llm_dataset_generator.ipynb diff --git a/week3/community-contributions/llm_dataset_generator.ipynb b/week3/community-contributions/llm_dataset_generator.ipynb new file mode 100644 index 0000000..3de4ce1 --- /dev/null +++ b/week3/community-contributions/llm_dataset_generator.ipynb @@ -0,0 +1,1801 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Synthetic Data Generator Notebook\n", + "## About\n", + "This colab notebook demonstrates the use of Frontier and Open-source LLM models for generating synthetic dataset for a business scenario provided by the user. From a UI interface implemented in gradio, a user can define their business scenario in detail, select the number of records needed along with the its format and adjust the number of max output tokens to be generated by the chosen LLM.\n", + "\n", + "It does not stop here. Once the records have been produced in the LLM output, it can be extracted and stored in a file, format same as set by user before. The file is stored in colab notebook under the contents directory. All of this is extraction is done with the help of the 're' library. My first time using it and I totally enjoyed learning it.\n", + "\n", + "## Outlook\n", + "Sometimes the response is loaded with the user prompt and a lot of tags when using an open-source models, such as Mixtral from Mistral. This is because of the prompt format being used. The 'assistant' 'role' format does not suit them. This is an optimization to look for and can be easily done by using custom prompt template for such models and these templates are hinted on their huggingface repo." + ], + "metadata": { + "id": "SFA6R-4jL7SS" + } + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ip4I4Lff3B2M" + }, + "source": [ + "## Install & Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8zVlW-GMcBaU", + "outputId": "0c473564-fb93-41a9-c819-e6aa2382d75a" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.2/54.2 MB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m323.1/323.1 kB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.0/264.0 kB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m95.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m78.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m48.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m1.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m83.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.1/76.1 MB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m95.2/95.2 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.6/11.6 MB\u001b[0m \u001b[31m95.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m72.0/72.0 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.5/62.5 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h" + ] + } + ], + "source": [ + "!pip install -q gradio anthropic requests torch bitsandbytes transformers accelerate openai" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "YKVNzE5sFH2l" + }, + "outputs": [], + "source": [ + "# imports\n", + "import re\n", + "import os\n", + "import sys\n", + "import gc\n", + "import io\n", + "import json\n", + "import anthropic\n", + "import gradio as gr\n", + "import requests\n", + "import subprocess\n", + "import google.generativeai as ggai\n", + "import torch\n", + "import tempfile\n", + "import shutil\n", + "from io import StringIO\n", + "import pandas as pd\n", + "from google.colab import userdata\n", + "from huggingface_hub import login\n", + "from openai import OpenAI\n", + "from pathlib import Path\n", + "from datetime import datetime\n", + "from IPython.display import Markdown, display, update_display\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LWpD6bZv3mAR" + }, + "source": [ + "## HuggingFace Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "aeC2oWY2FTv7" + }, + "outputs": [], + "source": [ + "# Sign in to HuggingFace Hub\n", + "\n", + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8Au2UPVy3vn5" + }, + "source": [ + "## Frontier Models configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "geBBsd14X3UL" + }, + "outputs": [], + "source": [ + "openai_client = OpenAI(api_key=userdata.get('OPENAI_API_KEY'))\n", + "anthropic_client = anthropic.Anthropic(api_key=userdata.get('ANTHROPIC_API_KEY'))\n", + "ggai.configure(api_key=userdata.get('GOOGLE_API_KEY'))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tCnDIOlKgjbO" + }, + "source": [ + "## Defining Prompts" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "gkwXZsxofAU1" + }, + "outputs": [], + "source": [ + "system_prompt = \"\"\"\n", + "You are a synthetic dataset generator. Your role is to create synthetic dataset that infers structured data schemas from business scenarios given by the user.\n", + "\n", + "Your task is to:\n", + "1. Understand the user's business problem(s) or use case(s).\n", + "2. Identify the key fields needed to support that scenario.\n", + "3. Define appropriate field names, data types, and formats.\n", + "4. Generate synthetic records that match the inferred schema.\n", + "\n", + "Guidelines:\n", + "- Use realistic field names and values. Do not invent unrelated fields or values.\n", + "- Choose sensible data types: string, integer, float, date, boolean, enum, etc.\n", + "- Respect logical constraints (e.g., age range, date ranges, email formats).\n", + "- Output the dataset in the format the user requests (json, csv, txt, markdown table).\n", + "- If the scenario is vague or broad, make reasonable assumptions and explain them briefly before generating the dataset.\n", + "- Always generate a dataset that supports the business use case logically.\n", + "\n", + "Before generating the data, display the inferred schema in a readable format.\n", + "\"\"\"\n", + "\n", + "# trial_user_prompt = \"I’m building a churn prediction model for a telecom company. Can you generate a synthetic dataset with 100 rows?\"\n", + "def get_user_prompt(business_problem, no_of_samples, file_format):\n", + " return f\"\"\"\n", + " The business scenario for which I want you to generate a dataset is defined below:\n", + " {business_problem}\n", + "\n", + " Generate a synthetic dataset of {no_of_samples} records in {file_format} format.\n", + " When generating the dataset, wrap it between the '<<<>>>' tag. Make sure the tag is there in the output.\n", + " Do not include any other special characters in between the tags, other than the ones required in producing the correct format of data.\n", + " For examples: When a 'csv' format is given, only the ',' character can be used in between the tags.\n", + " \"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yNpVf9-oQdoO" + }, + "source": [ + "### Quanitzation Config" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "3ErZ315MQdU3" + }, + "outputs": [], + "source": [ + "# This allows us to load the model into memory and use less memory\n", + "def get_quantization_config():\n", + " return BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "clGtRh0N4951" + }, + "source": [ + "## HF Model inference" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "MAhyn1ehb3Dh" + }, + "outputs": [], + "source": [ + "# All in one HuggingFace Model Response function\n", + "def run_hfmodel_and_get_response(prompt, model_name, output_tokens):\n", + " tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + " inputs = tokenizer.apply_chat_template(prompt, return_tensors=\"pt\")\n", + " if torch.cuda.is_available():\n", + " inputs = inputs.to(\"cuda\")\n", + " streamer = TextStreamer(tokenizer)\n", + " if \"microsoft/bitnet-b1.58-2B-4T\" in model_name:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", trust_remote_code=True)\n", + " elif \"tiiuae/Falcon-E-3B-Instruct\" in model_name:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", torch_dtype=torch.float16 )\n", + " else:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", quantization_config=get_quantization_config())\n", + " outputs = model.generate(inputs, max_new_tokens=output_tokens, streamer=streamer)\n", + " response = tokenizer.decode(outputs[0])\n", + " del model, inputs, tokenizer, outputs\n", + " gc.collect()\n", + " torch.cuda.empty_cache()\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Gh_Ny1aM-L8z" + }, + "source": [ + "## Frontier Models Inference" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "h11WlZNhfHCR" + }, + "outputs": [], + "source": [ + "# ChatGPT, Claude and Gemini response function\n", + "def get_chatgpt_response(prompt, model_name, output_tokens):\n", + " response = openai_client.chat.completions.create(\n", + " model=model_name,\n", + " messages=prompt,\n", + " max_tokens=output_tokens,\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "def get_claude_response(prompt, model_name, output_tokens):\n", + " response = anthropic_client.messages.create(\n", + " model=model_name,\n", + " max_tokens=output_tokens,\n", + " system=system_prompt,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": prompt,\n", + " }\n", + " ],\n", + " )\n", + " return response.content[0].text\n", + "\n", + "def get_gemini_response(prompt, model_name, output_tokens):\n", + " model = ggai.GenerativeModel(\n", + " model_name=model_name,\n", + " system_instruction=system_prompt,\n", + " )\n", + "\n", + " response = model.generate_content(prompt, generation_config={\n", + " \"max_output_tokens\": output_tokens,\n", + " \"temperature\": 0.7,\n", + " })\n", + " return response.text" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nzHbM_WQvRgT" + }, + "source": [ + "## Gradio Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uFWZqw1R-al_" + }, + "source": [ + "### Dropdowns Selection Lists" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "rOzEb0o--aD7" + }, + "outputs": [], + "source": [ + "# Dropdown List Values for the user\n", + "MODEL_TYPES=[\"GPT\", \"Claude\", \"Gemini\", \"HuggingFace\"]\n", + "OPENAI_MODEL_NAMES=[\"gpt-4o-mini\", \"gpt-4o\", \"gpt-3.5-turbo\"]\n", + "ANTHROPIC_MODELS=[\"claude-3-7-sonnet-latest\", \"claude-3-5-haiku-latest\", \"claude-3-opus-latest\"]\n", + "GOOGLE_MODELS=[\"gemini-2.0-flash\", \"gemini-1.5-pro\"]\n", + "HUGGINGFACE_MODELS=[\n", + " \"meta-llama/Llama-3.2-3B-Instruct\",\n", + " \"microsoft/bitnet-b1.58-2B-4T\",\n", + " \"ByteDance-Seed/Seed-Coder-8B-Instruct\",\n", + " \"tiiuae/Falcon-E-3B-Instruct\",\n", + " \"Qwen/Qwen2.5-7B-Instruct\"\n", + "]\n", + "MODEL_NAMES = {\n", + " \"GPT\": OPENAI_MODEL_NAMES,\n", + " \"Claude\": ANTHROPIC_MODELS,\n", + " \"Gemini\": GOOGLE_MODELS,\n", + " \"HuggingFace\": HUGGINGFACE_MODELS\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sbXGL8_4-oKc" + }, + "source": [ + "### UI" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "_0NCY7FgCVHj" + }, + "outputs": [], + "source": [ + "with gr.Blocks() as generator_ui:\n", + " gr.Markdown(\"# 🧠 Business Scenario → Synthetic Dataset Generator\")\n", + "\n", + " with gr.Row():\n", + " with gr.Column(scale=3):\n", + " with gr.Row():\n", + " dataset_size=gr.Number(value=10, label=\"Enter the number of data samples to generate.\", show_label=True)\n", + " format=gr.Dropdown([\"json\", \"csv\", \"txt\", \"markdown\"], label=\"Select the format for the dataset\", show_label=True)\n", + " with gr.Row():\n", + " scenario=gr.Textbox(label=\"Business Scenario\", lines=5, placeholder=\"Describe your business scenario here\")\n", + " with gr.Row():\n", + " error = gr.Markdown(visible=False)\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear Everything\")\n", + " submit = gr.Button(\"Generate Dataset\", variant=\"primary\")\n", + "\n", + " with gr.Column(scale=1):\n", + " model_type = gr.Dropdown(MODEL_TYPES, label=\"Model Type\", show_label=True, info=\"Select the model type you want to use\")\n", + " model_name = gr.Dropdown(MODEL_NAMES[model_type.value], label=\"Model Name\", show_label=True, allow_custom_value=True, info=\"Select the model name or enter one manually\")\n", + " output_tokens= gr.Number(value=1000, label=\"Enter the max number of output tokens to generate.\", show_label=True, info=\"This will impact the length of the response containg the dataset\")\n", + "\n", + " with gr.Row():\n", + " # Chatbot Interface\n", + " chatbot = gr.Chatbot(\n", + " type='messages',\n", + " label='Chatbot',\n", + " show_label=True,\n", + " height=300,\n", + " resizable=True,\n", + " elem_id=\"chatbot\",\n", + " avatar_images=(\"🧑\", \"🤖\",)\n", + " )\n", + " with gr.Row(variant=\"compact\"):\n", + " extract_btn = gr.Button(\"Extract and Save Dataset\", variant=\"huggingface\", visible=False)\n", + " file_name = gr.Textbox(label=\"Enter file name here (without file extension)\", placeholder=\"e.g. cancer_synthetic, warehouse_synthetic (no digits)\", visible=False)\n", + " with gr.Row():\n", + " markdown_preview = gr.Markdown(visible = False)\n", + " dataset_preview = gr.Textbox(label=\"Dataset Preview\",visible=False)\n", + " with gr.Row():\n", + " file_saved = gr.Textbox(visible=False)\n", + "\n", + " def run_inference(scenario, model_type, model_name, output_tokens, dataset_size, format):\n", + " \"\"\"Run the model and get the response\"\"\"\n", + " model_type=model_type.lower()\n", + " print(f\"scenario: {scenario}\")\n", + " print(f\"model_type: {model_type}\")\n", + " print(f\"model_name: {model_name}\")\n", + " if not scenario.strip():\n", + " return gr.update(value=\"❌ **Error:** Please define a scenario first!\",visible=True), []\n", + "\n", + " user_prompt = get_user_prompt(scenario, dataset_size, format)\n", + " prompt = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ]\n", + "\n", + " if model_type == \"gpt\":\n", + " response = get_chatgpt_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", + " elif model_type == \"claude\":\n", + " response = get_claude_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", + " elif model_type == \"gemini\":\n", + " response = get_gemini_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", + " else:\n", + " response = run_hfmodel_and_get_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", + " torch.cuda.empty_cache()\n", + " history = [\n", + " {\"role\": \"user\", \"content\": scenario},\n", + " {\"role\": \"assistant\", \"content\": response}\n", + " ]\n", + " return gr.update(visible=False), history\n", + "\n", + " def extract_dataset_string(response):\n", + " \"\"\"Extract dataset content between defined tags using regex.\"\"\"\n", + " # Remove known artificial tokens (common in HuggingFace or Claude)\n", + " response = re.sub(r\"<\\[.*?\\]>\", \"\", response)\n", + "\n", + " # Remove system or prompt echo if repeated before dataset\n", + " response = re.sub(r\"(?is)^.*?<<<\", \"<<<\", response.strip(), count=1)\n", + "\n", + " # 1. Match strict <<<>>>...<<<>>> tag blocks (use last match)\n", + " matches = re.findall(r\"<<<>>>[\\s\\r\\n]*(.*?)[\\s\\r\\n]*<<<>>>\", response, re.DOTALL)\n", + " if matches:\n", + " return matches[-1].strip()\n", + "\n", + " # 2. Match loose <<< ... >>> format\n", + " matches = re.findall(r\"<<<[\\s\\r\\n]*(.*?)[\\s\\r\\n]*>>>\", response, re.DOTALL)\n", + " if matches:\n", + " return matches[-1].strip()\n", + "\n", + " # 3. Match final fallback: take everything after last <<< as raw data\n", + " last_open = response.rfind(\"<<<\")\n", + " if last_open != -1:\n", + " raw = response[last_open + 3 :].strip()\n", + " # Optionally cut off noisy trailing notes, explanations, etc.\n", + " raw = re.split(r\"\\n\\s*\\n|Explanation:|Note:|---\", raw)[0]\n", + " return raw.strip()\n", + "\n", + " return \"Could not extract dataset! Try again with a different model.\"\n", + "\n", + " def extract_dataset_from_response(chatbot_history, file_name, file_type):\n", + " \"\"\"Extract dataset and update in gradio UI components\"\"\"\n", + " response = chatbot_history[-1][\"content\"]\n", + " if not response:\n", + " return gr.update(visible=True, value=\"Could not find LLM Response! Try again.\"), gr.update(visible=False)\n", + "\n", + " # match = re.search(r'<<<\\s*(.*?)\\s*>>>', response, re.DOTALL)\n", + " # print(match)\n", + " # if match and match.group(1).strip() == \"\":\n", + " # match = re.search(r'<<<>>>\\s*(.*?)\\s*<<<>>>', response, re.DOTALL)\n", + " # print(match)\n", + " # if match is None:\n", + " # return gr.update(visible=True, value=\"Could not extract dataset! Try again with a different model.\"), gr.update(visible=False)\n", + " # dataset = match.group(1).strip()\n", + " dataset = extract_dataset_string(response)\n", + " if dataset == \"Could not extract dataset! Try again with a different model.\":\n", + " return gr.update(visible=True, value=dataset), gr.update(visible=False)\n", + " text = save_dataset(dataset, file_type, file_name)\n", + " return gr.update(visible=True, value=text), gr.update(visible=True, value=dataset)\n", + "\n", + " def save_dataset(dataset, file_format, file_name):\n", + " \"\"\"Save dataset to a file based on the selected format.\"\"\"\n", + " file_name=file_name+\".\"+file_format\n", + " print(dataset)\n", + " print(file_name)\n", + " if file_format == \"json\":\n", + " try:\n", + " data = json.loads(dataset)\n", + " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", + " json.dump(data, f, indent=4)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + " elif file_format == \"csv\":\n", + " try:\n", + " df = pd.read_csv(StringIO(dataset))\n", + " df.to_csv(file_name, index=False)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + " elif file_format == \"txt\":\n", + " try:\n", + " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", + " f.write(dataset)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + "\n", + " def clear_chat():\n", + " \"\"\"Clear the chat history.\"\"\"\n", + " return \"\", [], gr.update(visible=False), gr.update(visible=False)\n", + "\n", + " def show_extract_btn(chatbot_history, format):\n", + " \"\"\"Show the extract button if the response has been displayed in the chatbot and format is not set to markdown\"\"\"\n", + " if chatbot_history == []:\n", + " return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)\n", + " if format == \"markdown\":\n", + " return gr.update(visible=True, value=chatbot_history[1][\"content\"]), gr.update(visible=False), gr.update(visible=False)\n", + " return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)\n", + "\n", + " extract_btn.click(\n", + " fn=extract_dataset_from_response,\n", + " inputs=[chatbot, file_name, format],\n", + " outputs=[file_saved, dataset_preview]\n", + " )\n", + "\n", + " chatbot.change(\n", + " fn=show_extract_btn,\n", + " inputs=[chatbot, format],\n", + " outputs=[markdown_preview, extract_btn, file_name]\n", + " )\n", + "\n", + " model_type.change(\n", + " fn=lambda x: gr.update(choices=MODEL_NAMES[x], value=MODEL_NAMES[x][0]),\n", + " inputs=[model_type],\n", + " outputs=[model_name]\n", + " )\n", + "\n", + " submit.click(\n", + " fn=run_inference,\n", + " inputs=[scenario, model_type, model_name, output_tokens, dataset_size, format],\n", + " outputs=[error, chatbot],\n", + " show_progress=True\n", + " )\n", + "\n", + " clear.click(\n", + " clear_chat,\n", + " outputs=[scenario, chatbot, dataset_preview, file_saved]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "collapsed": true, + "id": "kzDUJahK8uRN", + "outputId": "c5674be2-b262-4439-ae91-4f3e1f49e041" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n", + "* Running on public URL: https://d076a9fef9034a4f24.gradio.live\n", + "\n", + "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "
" + ] + }, + "metadata": {} + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "scenario: Generate a dataset for training a model to approve/reject loan applications. Include features like loan amount, applicant income, co-applicant income, employment type, credit history (binary), loan term, number of dependents, education level, and loan approval status.\n", + "model_type: gpt\n", + "model_name: gpt-4o\n", + "Loan Amount,Applicant Income,Co-applicant Income,Employment Type,Credit History,Loan Term,Number of Dependents,Education Level,Loan Approval Status\n", + "250000,60000,15000,Salaried,1,240,1,Graduate,Approved\n", + "350000,80000,0,Salaried,1,360,2,Graduate,Approved\n", + "120000,30000,10000,Self-employed,0,180,1,Not Graduate,Rejected\n", + "500000,150000,50000,Self-employed,1,300,3,Graduate,Approved\n", + "75000,20000,0,Unemployed,0,120,0,Graduate,Rejected\n", + "275000,75000,25000,Salaried,0,240,2,Not Graduate,Rejected\n", + "100000,40000,20000,Salaried,1,60,0,Graduate,Approved\n", + "310000,95000,0,Self-employed,1,360,1,Graduate,Approved\n", + "450000,50000,0,Self-employed,0,180,4,Not Graduate,Rejected\n", + "200000,55000,20000,Salaried,1,120,3,Graduate,Approved\n", + "100000,35000,0,Unemployed,0,60,0,Not Graduate,Rejected\n", + "230000,68000,13000,Salaried,1,240,1,Graduate,Approved\n", + "330000,99000,40000,Self-employed,1,300,2,Graduate,Approved\n", + "150000,18000,7500,Unemployed,0,48,0,Not Graduate,Rejected\n", + "210000,64000,0,Salaried,0,120,1,Graduate,Rejected\n", + "310000,87000,30000,Self-employed,1,360,2,Graduate,Approved\n", + "50000,22000,7000,Unemployed,0,24,0,Not Graduate,Rejected\n", + "290000,92000,20000,Salaried,1,240,3,Graduate,Approved\n", + "110000,45000,0,Salaried,0,36,0,Graduate,Rejected\n", + "450000,76000,25000,Self-employed,1,360,2,Graduate,Approved\n", + "loan_approval_synthetic.txt\n", + "scenario: Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", + "model_type: gpt\n", + "model_name: gpt-4o\n", + "scenario: Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", + "model_type: gpt\n", + "model_name: gpt-4o\n", + "[\n", + " {\n", + " \"appointment_id\": \"AID001\",\n", + " \"scheduled_date\": \"2023-11-01\",\n", + " \"appointment_date\": \"2023-11-10\",\n", + " \"lead_time\": 9,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 45,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID002\",\n", + " \"scheduled_date\": \"2023-11-03\",\n", + " \"appointment_date\": \"2023-11-15\",\n", + " \"lead_time\": 12,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 34,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID003\",\n", + " \"scheduled_date\": \"2023-11-05\",\n", + " \"appointment_date\": \"2023-11-11\",\n", + " \"lead_time\": 6,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 29,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID004\",\n", + " \"scheduled_date\": \"2023-11-02\",\n", + " \"appointment_date\": \"2023-11-14\",\n", + " \"lead_time\": 12,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 62,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID005\",\n", + " \"scheduled_date\": \"2023-11-06\",\n", + " \"appointment_date\": \"2023-11-13\",\n", + " \"lead_time\": 7,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 21,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID006\",\n", + " \"scheduled_date\": \"2023-11-08\",\n", + " \"appointment_date\": \"2023-11-17\",\n", + " \"lead_time\": 9,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 58,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID007\",\n", + " \"scheduled_date\": \"2023-11-10\",\n", + " \"appointment_date\": \"2023-11-18\",\n", + " \"lead_time\": 8,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 41,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID008\",\n", + " \"scheduled_date\": \"2023-11-07\",\n", + " \"appointment_date\": \"2023-11-12\",\n", + " \"lead_time\": 5,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 67,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID009\",\n", + " \"scheduled_date\": \"2023-11-12\",\n", + " \"appointment_date\": \"2023-11-20\",\n", + " \"lead_time\": 8,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 74,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID010\",\n", + " \"scheduled_date\": \"2023-11-09\",\n", + " \"appointment_date\": \"2023-11-16\",\n", + " \"lead_time\": 7,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 25,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID011\",\n", + " \"scheduled_date\": \"2023-11-13\",\n", + " \"appointment_date\": \"2023-11-21\",\n", + " \"lead_time\": 8,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 32,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID012\",\n", + " \"scheduled_date\": \"2023-11-14\",\n", + " \"appointment_date\": \"2023-11-25\",\n", + " \"lead_time\": 11,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 48,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID013\",\n", + " \"scheduled_date\": \"2023-11-15\",\n", + " \"appointment_date\": \"2023-11-27\",\n", + " \"lead_time\": 12,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 36,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID014\",\n", + " \"scheduled_date\": \"2023-11-17\",\n", + " \"appointment_date\": \"2023-12-02\",\n", + " \"lead_time\": 15,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 28,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID015\",\n", + " \"scheduled_date\": \"2023-11-16\",\n", + " \"appointment_date\": \"2023-12-01\",\n", + " \"lead_time\": 15,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 60,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID016\",\n", + " \"scheduled_date\": \"2023-11-18\",\n", + " \"appointment_date\": \"2023-12-05\",\n", + " \"lead_time\": 17,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 40,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID017\",\n", + " \"scheduled_date\": \"2023-11-19\",\n", + " \"appointment_date\": \"2023-12-03\",\n", + " \"lead_time\": 14,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 19,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID018\",\n", + " \"scheduled_date\": \"2023-11-21\",\n", + " \"appointment_date\": \"2023-12-07\",\n", + " \"lead_time\": 16,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 51,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID019\",\n", + " \"scheduled_date\": \"2023-11-23\",\n", + " \"appointment_date\": \"2023-12-09\",\n", + " \"lead_time\": 16,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 55,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID020\",\n", + " \"scheduled_date\": \"2023-11-22\",\n", + " \"appointment_date\": \"2023-12-08\",\n", + " \"lead_time\": 16,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 23,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID021\",\n", + " \"scheduled_date\": \"2023-11-24\",\n", + " \"appointment_date\": \"2023-12-10\",\n", + " \"lead_time\": 16,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 47,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID022\",\n", + " \"scheduled_date\": \"2023-11-25\",\n", + " \"appointment_date\": \"2023-12-12\",\n", + " \"lead_time\": 17,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 33,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID023\",\n", + " \"scheduled_date\": \"2023-11-27\",\n", + " \"appointment_date\": \"2023-12-14\",\n", + " \"lead_time\": 17,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 42,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID024\",\n", + " \"scheduled_date\": \"2023-11-29\",\n", + " \"appointment_date\": \"2023-12-15\",\n", + " \"lead_time\": 16,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 64,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID025\",\n", + " \"scheduled_date\": \"2023-12-01\",\n", + " \"appointment_date\": \"2023-12-20\",\n", + " \"lead_time\": 19,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 26,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID026\",\n", + " \"scheduled_date\": \"2023-12-03\",\n", + " \"appointment_date\": \"2023-12-22\",\n", + " \"lead_time\": 19,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 31,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID027\",\n", + " \"scheduled_date\": \"2023-12-05\",\n", + " \"appointment_date\": \"2023-12-24\",\n", + " \"lead_time\": 19,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 50,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID028\",\n", + " \"scheduled_date\": \"2023-12-06\",\n", + " \"appointment_date\": \"2023-12-25\",\n", + " \"lead_time\": 19,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 39,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID029\",\n", + " \"scheduled_date\": \"2023-12-07\",\n", + " \"appointment_date\": \"2023-12-27\",\n", + " \"lead_time\": 20,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 71,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID030\",\n", + " \"scheduled_date\": \"2023-12-08\",\n", + " \"appointment_date\": \"2023-12-28\",\n", + " \"lead_time\": 20,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 44,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID031\",\n", + " \"scheduled_date\": \"2023-12-10\",\n", + " \"appointment_date\": \"2023-12-31\",\n", + " \"lead_time\": 21,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 38,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID032\",\n", + " \"scheduled_date\": \"2023-12-11\",\n", + " \"appointment_date\": \"2024-01-02\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 53,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID033\",\n", + " \"scheduled_date\": \"2023-12-13\",\n", + " \"appointment_date\": \"2024-01-04\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 27,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID034\",\n", + " \"scheduled_date\": \"2023-12-15\",\n", + " \"appointment_date\": \"2024-01-06\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 46,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID035\",\n", + " \"scheduled_date\": \"2023-12-17\",\n", + " \"appointment_date\": \"2024-01-09\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 68,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID036\",\n", + " \"scheduled_date\": \"2023-12-19\",\n", + " \"appointment_date\": \"2024-01-10\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 37,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID037\",\n", + " \"scheduled_date\": \"2023-12-20\",\n", + " \"appointment_date\": \"2024-01-12\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 57,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID038\",\n", + " \"scheduled_date\": \"2023-12-22\",\n", + " \"appointment_date\": \"2024-01-14\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 43,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID039\",\n", + " \"scheduled_date\": \"2023-12-23\",\n", + " \"appointment_date\": \"2024-01-16\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 65,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID040\",\n", + " \"scheduled_date\": \"2023-12-25\",\n", + " \"appointment_date\": \"2024-01-17\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 49,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID041\",\n", + " \"scheduled_date\": \"2023-12-27\",\n", + " \"appointment_date\": \"2024-01-20\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 30,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID042\",\n", + " \"scheduled_date\": \"2023-12-29\",\n", + " \"appointment_date\": \"2024-01-22\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 24,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID043\",\n", + " \"scheduled_date\": \"2024-01-01\",\n", + " \"appointment_date\": \"2024-01-25\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 72,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID044\",\n", + " \"scheduled_date\": \"2024-01-03\",\n", + " \"appointment_date\": \"2024-01-27\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 35,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID045\",\n", + " \"scheduled_date\": \"2024-01-04\",\n", + " \"appointment_date\": \"2024-01-28\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 61,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID046\",\n", + " \"scheduled_date\": \"2024-01-05\",\n", + " \"appointment_date\": \"2024-01-30\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 68,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID047\",\n", + " \"scheduled_date\": \"2024-01-07\",\n", + " \"appointment_date\": \"2024-02-01\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 22,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID048\",\n", + " \"scheduled_date\": \"2024-01-08\",\n", + " \"appointment_date\": \"2024-02-03\",\n", + " \"lead_time\": 26,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 52,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID049\",\n", + " \"scheduled_date\": \"2024-01-10\",\n", + " \"appointment_date\": \"2024-02-04\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 73,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID050\",\n", + " \"scheduled_date\": \"2024-01-12\",\n", + " \"appointment_date\": \"2024-02-06\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 56,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID051\",\n", + " \"scheduled_date\": \"2024-01-15\",\n", + " \"appointment_date\": \"2024-02-07\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 62,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID052\",\n", + " \"scheduled_date\": \"2024-01-17\",\n", + " \"appointment_date\": \"2024-02-10\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 80,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID053\",\n", + " \"scheduled_date\": \"2024-01-19\",\n", + " \"appointment_date\": \"2024-02-12\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 29,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID054\",\n", + " \"scheduled_date\": \"2024-01-21\",\n", + " \"appointment_date\": \"2024-02-13\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 66,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID055\",\n", + " \"scheduled_date\": \"2024-01-23\",\n", + " \"appointment_date\": \"2024-02-15\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 77,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID056\",\n", + " \"scheduled_date\": \"2024-01-25\",\n", + " \"appointment_date\": \"2024-02-17\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 54,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID057\",\n", + " \"scheduled_date\": \"2024-01-28\",\n", + " \"appointment_date\": \"2024-02-19\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 28,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID058\",\n", + " \"scheduled_date\": \"2024-01-30\",\n", + " \"appointment_date\": \"2024-02-22\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 45,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID059\",\n", + " \"scheduled_date\": \"2024-02-01\",\n", + " \"appointment_date\": \"2024-02-24\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 69,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID060\",\n", + " \"scheduled_date\": \"2024-02-02\",\n", + " \"appointment_date\": \"2024-02-26\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 51,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID061\",\n", + " \"scheduled_date\": \"2024-02-04\",\n", + " \"appointment_date\": \"2024-02-27\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 33,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID062\",\n", + " \"scheduled_date\": \"2024-02-06\",\n", + " \"appointment_date\": \"2024-03-01\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 84,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID063\",\n", + " \"scheduled_date\": \"2024-02-09\",\n", + " \"appointment_date\": \"2024-03-04\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 47,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID064\",\n", + " \"scheduled_date\": \"2024-02-10\",\n", + " \"appointment_date\": \"2024-03-06\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 59,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID065\",\n", + " \"scheduled_date\": \"2024-02-12\",\n", + " \"appointment_date\": \"2024-03-08\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 20,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID066\",\n", + " \"scheduled_date\": \"2024-02-14\",\n", + " \"appointment_date\": \"2024-03-10\",\n", + " \"lead_time\": 25,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 48,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID067\",\n", + " \"scheduled_date\": \"2024-02-17\",\n", + " \"appointment_date\": \"2024-03-12\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 38,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID068\",\n", + " \"scheduled_date\": \"2024-02-19\",\n", + " \"appointment_date\": \"2024-03-14\",\n", + " \"lead_time\": 24,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 76,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID069\",\n", + " \"scheduled_date\": \"2024-02-21\",\n", + " \"appointment_date\": \"2024-03-15\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 34,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID070\",\n", + " \"scheduled_date\": \"2024-02-23\",\n", + " \"appointment_date\": \"2024-03-17\",\n", + " \"lead_time\": 23,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 26,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID071\",\n", + " \"scheduled_date\": \"2024-02-25\",\n", + " \"appointment_date\": \"2024-03-19\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 22,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 2,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID072\",\n", + " \"scheduled_date\": \"2024-02-27\",\n", + " \"appointment_date\": \"2024-03-20\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 0,\n", + " \"patient_age\": 58,\n", + " \"gender\": \"Other\",\n", + " \"health_condition_severity\": 1,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID073\",\n", + " \"scheduled_date\": \"2024-02-29\",\n", + " \"appointment_date\": \"2024-03-22\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 3,\n", + " \"patient_age\": 67,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 3,\n", + " \"no_show_status\": false\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID074\",\n", + " \"scheduled_date\": \"2024-03-02\",\n", + " \"appointment_date\": \"2024-03-24\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 2,\n", + " \"patient_age\": 32,\n", + " \"gender\": \"Female\",\n", + " \"health_condition_severity\": 4,\n", + " \"no_show_status\": true\n", + " },\n", + " {\n", + " \"appointment_id\": \"AID075\",\n", + " \"scheduled_date\": \"2024-03-04\",\n", + " \"appointment_date\": \"2024-03-26\",\n", + " \"lead_time\": 22,\n", + " \"sms_reminders_sent\": 1,\n", + " \"patient_age\": 46,\n", + " \"gender\": \"Male\",\n", + " \"health_condition_severity\": 5,\n", + " \"no_show_status\": false\n", + " }\n", + "]\n", + "medical_appointment.json\n", + "scenario: Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", + "model_type: claude\n", + "model_name: claude-3-7-sonnet-latest\n", + "scenario: Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", + "model_type: claude\n", + "model_name: claude-3-7-sonnet-latest\n", + "transaction_id,amount,timestamp,merchant_category,customer_location,card_presence,device_type,fraud_label\n", + "TX123456789,45.99,2023-11-01 08:23:15,Retail,New York,Yes,POS Terminal,No\n", + "TX123456790,899.50,2023-11-01 09:45:22,Electronics,Chicago,Yes,POS Terminal,No\n", + "TX123456791,12.35,2023-11-01 10:12:45,Food & Beverage,Los Angeles,No,Mobile,No\n", + "TX123456792,5423.80,2023-11-01 11:30:18,Jewelry,Miami,No,Web Browser,Yes\n", + "TX123456793,76.24,2023-11-01 14:22:56,Groceries,Denver,Yes,POS Terminal,No\n", + "TX123456794,149.99,2023-11-02 07:15:33,Clothing,Seattle,No,Mobile,No\n", + "TX123456795,2500.00,2023-11-02 08:45:12,Electronics,Toronto,No,Web Browser,Yes\n", + "TX123456796,35.50,2023-11-02 12:33:47,Food & Beverage,Boston,Yes,POS Terminal,No\n", + "TX123456797,10.99,2023-11-02 15:20:09,Entertainment,Philadelphia,No,Mobile,No\n", + "TX123456798,750.25,2023-11-02 16:45:18,Travel,San Francisco,No,Web Browser,No\n", + "TX123456799,65.40,2023-11-02 19:22:31,Retail,Austin,Yes,POS Terminal,No\n", + "TX123456800,3299.99,2023-11-03 05:45:22,Electronics,London,No,Web Browser,Yes\n", + "TX123456801,22.50,2023-11-03 08:12:40,Food & Beverage,Atlanta,Yes,POS Terminal,No\n", + "TX123456802,129.95,2023-11-03 10:33:27,Clothing,Chicago,No,Mobile,No\n", + "TX123456803,50.00,2023-11-03 12:15:39,Gas Station,Dallas,Yes,POS Terminal,No\n", + "TX123456804,1999.00,2023-11-03 14:30:45,Electronics,Singapore,No,Web Browser,No\n", + "TX123456805,8.75,2023-11-03 18:22:14,Food & Beverage,Montreal,No,Mobile,No\n", + "TX123456806,459.99,2023-11-04 09:15:33,Home Goods,Houston,Yes,POS Terminal,No\n", + "TX123456807,2750.00,2023-11-04 10:45:28,Travel,Paris,No,Web Browser,Yes\n", + "TX123456808,85.00,2023-11-04 11:33:52,Healthcare,New York,Yes,POS Terminal,No\n", + "TX123456809,17.25,2023-11-04 13:10:44,Food & Beverage,Los Angeles,No,Mobile,No\n", + "TX123456810,150.49,2023-11-04 15:22:18,Entertainment,Miami,No,Mobile,No\n", + "TX123456811,4500.00,2023-11-04 19:45:02,Jewelry,Dubai,No,Web Browser,Yes\n", + "TX123456812,27.99,2023-11-05 08:33:27,Groceries,Seattle,Yes,POS Terminal,No\n", + "TX123456813,1250.00,2023-11-05 10:15:42,Electronics,Tokyo,No,Web Browser,No\n", + "TX123456814,56.75,2023-11-05 12:20:35,Clothing,San Diego,No,Mobile,No\n", + "TX123456815,18.50,2023-11-05 14:30:19,Food & Beverage,Denver,Yes,POS Terminal,No\n", + "TX123456816,3750.25,2023-11-05 16:45:08,Travel,Sydney,No,Web Browser,Yes\n", + "TX123456817,95.00,2023-11-05 18:22:56,Healthcare,Boston,No,Mobile,No\n", + "TX123456818,2345.67,2023-11-05 20:15:33,Electronics,Berlin,No,Web Browser,Yes\n", + "fraud_transactions.csv\n", + "scenario: Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", + "model_type: gemini\n", + "model_name: gemini-1.5-pro\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:tornado.access:429 POST /v1beta/models/gemini-1.5-pro:generateContent?%24alt=json%3Benum-encoding%3Dint (127.0.0.1) 409.67ms\n", + "Traceback (most recent call last):\n", + " File \"/usr/local/lib/python3.11/dist-packages/gradio/queueing.py\", line 625, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/gradio/route_utils.py\", line 322, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2181, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 1692, in call_function\n", + " prediction = await anyio.to_thread.run_sync( # type: ignore\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/anyio/to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/anyio/_backends/_asyncio.py\", line 2470, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/anyio/_backends/_asyncio.py\", line 967, in run\n", + " result = context.run(func, *args)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/gradio/utils.py\", line 889, in wrapper\n", + " response = f(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^\n", + " File \"\", line 62, in run_inference\n", + " response = get_gemini_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"\", line 30, in get_gemini_response\n", + " response = model.generate_content(prompt, generation_config={\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n", + " response = self._client.generate_content(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/ai/generativelanguage_v1beta/services/generative_service/client.py\", line 835, in generate_content\n", + " response = rpc(\n", + " ^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/gapic_v1/method.py\", line 131, in __call__\n", + " return wrapped_func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 293, in retry_wrapped_func\n", + " return retry_target(\n", + " ^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 153, in retry_target\n", + " _retry_error_helper(\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_base.py\", line 212, in _retry_error_helper\n", + " raise final_exc from source_exc\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 144, in retry_target\n", + " result = target()\n", + " ^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/timeout.py\", line 130, in func_with_timeout\n", + " return func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/grpc_helpers.py\", line 76, in error_remapped_callable\n", + " return callable_(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/usr/local/lib/python3.11/dist-packages/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py\", line 1161, in __call__\n", + " raise core_exceptions.from_http_response(response)\n", + "google.api_core.exceptions.TooManyRequests: 429 POST https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?%24alt=json%3Benum-encoding%3Dint: You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "scenario: Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", + "model_type: gemini\n", + "model_name: gemini-2.0-flash\n", + "CustomerID,PortfolioValue,Age,IncomeBracket,RiskAppetite,TransactionsPerMonth,PreferredInvestmentType,RiskScore\n", + "1,75000.00,32,Medium,High,8,\"Stocks, Options\",78\n", + "2,120000.50,45,High,Medium,3,\"Bonds, Mutual Funds\",55\n", + "3,30000.75,28,Low,Low,1,\"Bonds\",25\n", + "4,250000.00,58,High,High,12,\"Stocks, Real Estate\",85\n", + "5,80000.25,39,Medium,Medium,5,\"Mutual Funds\",60\n", + "6,150000.00,48,High,Low,2,\"Bonds, ETFs\",40\n", + "7,45000.50,25,Low,Medium,4,\"Stocks\",50\n", + "8,300000.75,62,High,High,15,\"Stocks, Options, Real Estate\",92\n", + "9,90000.00,35,Medium,Medium,6,\"ETFs, Mutual Funds\",65\n", + "10,180000.25,50,High,Low,1,\"Bonds\",35\n", + "11,60000.50,29,Low,Low,2,\"Bonds, ETFs\",30\n", + "12,400000.00,65,High,High,18,\"Stocks, Options, Cryptocurrency\",95\n", + "13,100000.75,42,Medium,Medium,7,\"Mutual Funds, Real Estate\",70\n", + "14,200000.00,55,High,Low,0,\"Bonds, Annuities\",20\n", + "15,70000.25,31,Low,Medium,3,\"Stocks, ETFs\",58\n", + "16,130000.50,47,High,Medium,4,\"Bonds, Mutual Funds\",52\n", + "17,35000.75,27,Low,Low,1,\"Bonds\",28\n", + "18,280000.00,60,High,High,14,\"Stocks, Real Estate\",88\n", + "19,85000.25,37,Medium,Medium,5,\"ETFs\",63\n", + "20,160000.00,52,High,Low,2,\"Bonds, CDs\",38\n", + "21,50000.50,26,Low,Low,1,\"Bonds, Government Securities\",22\n", + "22,450000.75,68,High,High,20,\"Stocks, Options, Venture Capital\",97\n", + "23,110000.00,44,Medium,Medium,8,\"Mutual Funds, ETFs\",73\n", + "24,220000.25,57,High,Low,0,\"Bonds, Treasury Bills\",18\n", + "25,72000.50,33,Low,Medium,4,\"Stocks\",56\n", + "26,140000.00,49,High,Medium,3,\"Bonds, Mutual Funds\",54\n", + "27,32000.75,29,Low,Low,1,\"Bonds\",26\n", + "28,260000.00,61,High,High,13,\"Stocks, Real Estate\",86\n", + "29,82000.25,38,Medium,Medium,6,\"ETFs, Index Funds\",61\n", + "30,170000.50,53,High,Low,2,\"Bonds\",36\n", + "31,55000.75,24,Low,Low,2,\"Bonds, Money Market Accounts\",24\n", + "32,350000.00,64,High,High,17,\"Stocks, Options, Commodities\",93\n", + "33,95000.25,41,Medium,Medium,7,\"Mutual Funds, REITs\",68\n", + "34,190000.50,56,High,Low,0,\"Bonds, Fixed Income\",19\n", + "35,65000.00,30,Low,Medium,3,\"Stocks, Small Cap Stocks\",59\n", + "36,125000.75,46,High,Medium,4,\"Bonds, Large Cap Funds\",51\n", + "37,33000.25,28,Low,Low,1,\"Bonds\",27\n", + "38,270000.50,59,High,High,14,\"Stocks, Emerging Markets\",87\n", + "39,88000.00,36,Medium,Medium,5,\"ETFs, Balanced Funds\",64\n", + "40,155000.75,51,High,Low,2,\"Bonds, Corporate Bonds\",37\n", + "41,48000.25,25,Low,Low,1,\"Bonds, Municipal Bonds\",21\n", + "42,420000.00,67,High,High,19,\"Stocks, Options, Derivatives\",96\n", + "43,105000.75,43,Medium,Medium,8,\"Mutual Funds, Sector Funds\",71\n", + "44,210000.00,54,High,Low,0,\"Bonds, Government Bonds\",17\n", + "45,71000.25,32,Low,Medium,4,\"Stocks\",57\n", + "46,135000.50,48,High,Medium,3,\"Bonds, Index Funds\",53\n", + "47,34000.75,27,Low,Low,1,\"Bonds\",29\n", + "48,290000.00,63,High,High,16,\"Stocks, Real Estate, Private Equity\",90\n", + "49,89000.25,40,Medium,Medium,6,\"ETFs\",62\n", + "50,175000.50,50,High,Low,2,\"Bonds, Preferred Stocks\",39\n", + "investment_customers.csv\n", + "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", + "model_type: gemini\n", + "model_name: gemini-2.0-flash\n", + "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", + "model_type: gemini\n", + "model_name: gemini-2.0-flash\n", + "\n", + "testinggemini.json\n", + "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", + "model_type: gemini\n", + "model_name: gemini-2.0-flash\n", + "CustomerID,MonthlyCharges,ContractType,Tenure,SupportCalls,InternetUsage,PaymentMethod,Churned\n", + "TEL2847592374,67.55,Month-to-Month,9,3,145.2,Electronic Check,Yes\n", + "TEL9283746510,92.30,One Year,48,1,87.9,Credit Card,No\n", + "TEL1837465921,25.00,Month-to-Month,2,0,25.6,Mailed Check,Yes\n", + "TEL7364582910,115.75,Two Year,65,2,203.4,Bank Transfer,No\n", + "TEL5928374615,48.20,Month-to-Month,15,4,98.7,Electronic Check,Yes\n", + "TEL3847592016,78.90,One Year,36,1,167.1,Credit Card,No\n", + "TEL8273645910,31.50,Month-to-Month,3,0,30.2,Mailed Check,Yes\n", + "TEL6354789210,102.40,Two Year,70,3,185.9,Bank Transfer,No\n", + "TEL4738291056,55.85,Month-to-Month,11,2,112.5,Electronic Check,Yes\n", + "TEL1928374650,85.60,One Year,42,1,76.3,Credit Card,No\n", + "TEL7463529108,28.75,Month-to-Month,5,0,28.9,Mailed Check,Yes\n", + "TEL5293847610,110.30,Two Year,68,2,192.7,Bank Transfer,No\n", + "TEL3647582910,62.10,Month-to-Month,13,3,134.8,Electronic Check,Yes\n", + "TEL9182736450,98.45,One Year,39,1,91.5,Credit Card,No\n", + "TEL2736458109,34.90,Month-to-Month,7,0,33.6,Mailed Check,Yes\n", + "TEL8547392016,107.60,Two Year,62,2,179.3,Bank Transfer,No\n", + "TEL6192837450,59.35,Month-to-Month,10,3,123.4,Electronic Check,Yes\n", + "TEL4928374651,82.90,One Year,45,1,82.1,Credit Card,No\n", + "TEL1635294810,22.50,Month-to-Month,4,0,22.3,Mailed Check,Yes\n", + "TEL7283746509,118.20,Two Year,71,2,210.5,Bank Transfer,No\n", + "TEL5829374610,69.70,Month-to-Month,12,3,156.9,Electronic Check,Yes\n", + "TEL3918273640,95.15,One Year,40,1,89.7,Credit Card,No\n", + "TEL9374628105,37.40,Month-to-Month,6,0,36.2,Mailed Check,Yes\n", + "TEL6458293710,104.90,Two Year,67,2,188.1,Bank Transfer,No\n", + "TEL4829374615,57.10,Month-to-Month,14,3,118.2,Electronic Check,Yes\n", + "TEL1536472910,80.55,One Year,43,1,78.9,Credit Card,No\n", + "TEL7192837465,25.30,Month-to-Month,2,0,25.9,Mailed Check,Yes\n", + "TEL5374829106,112.90,Two Year,69,2,195.3,Bank Transfer,No\n", + "TEL3746582910,64.85,Month-to-Month,8,3,140.6,Electronic Check,Yes\n", + "TEL9263548107,90.20,One Year,46,1,85.5,Credit Card,No\n", + "TEL2635478109,32.65,Month-to-Month,4,0,31.4,Mailed Check,Yes\n", + "TEL8473920165,109.70,Two Year,63,2,182.5,Bank Transfer,No\n", + "TEL6283749105,54.50,Month-to-Month,16,3,110.1,Electronic Check,Yes\n", + "TEL4192837460,77.30,One Year,41,1,75.2,Credit Card,No\n", + "TEL1746352910,29.90,Month-to-Month,5,0,29.6,Mailed Check,Yes\n", + "TEL7382910564,117.10,Two Year,72,2,207.9,Bank Transfer,No\n", + "TEL5928374610,72.00,Month-to-Month,13,3,159.7,Electronic Check,Yes\n", + "TEL3847592016,97.85,One Year,38,1,93.2,Credit Card,No\n", + "TEL9182736450,39.55,Month-to-Month,7,0,38.3,Mailed Check,Yes\n", + "TEL6354789210,106.30,Two Year,66,2,190.8,Bank Transfer,No\n", + "TEL4738291056,51.75,Month-to-Month,11,3,105.9,Electronic Check,Yes\n", + "TEL1928374650,74.60,One Year,44,1,73.1,Credit Card,No\n", + "TEL7463529108,27.10,Month-to-Month,3,0,26.7,Mailed Check,Yes\n", + "TEL5293847610,114.50,Two Year,70,2,198.6,Bank Transfer,No\n", + "TEL3647582910,66.45,Month-to-Month,12,3,138.5,Electronic Check,Yes\n", + "TEL9182736450,93.50,One Year,47,1,84.2,Credit Card,No\n", + "TEL2736458109,35.15,Month-to-Month,6,0,34.9,Mailed Check,Yes\n", + "TEL8547392016,103.80,Two Year,64,2,176.1,Bank Transfer,No\n", + "TEL6192837450,58.20,Month-to-Month,14,3,120.7,Electronic Check,Yes\n", + "TEL4928374651,81.65,One Year,41,1,80.5,Credit Card,No\n", + "TEL1635294810,23.70,Month-to-Month,5,0,23.4,Mailed Check,Yes\n", + "TEL7283746509,119.90,Two Year,68,2,213.2,Bank Transfer,No\n", + "TEL5829374610,70.85,Month-to-Month,9,3,153.7,Electronic Check,Yes\n", + "TEL3918273640,96.20,One Year,45,1,92.4,Credit Card,No\n", + "TEL9374628105,36.80,Month-to-Month,7,0,35.6,Mailed Check,Yes\n", + "TEL6458293710,105.50,Two Year,69,2,185.4,Bank Transfer,No\n", + "TEL4829374615,56.30,Month-to-Month,15,3,115.1,Electronic Check,Yes\n", + "TEL1536472910,79.40,One Year,42,1,77.8,Credit Card,No\n", + "TEL7192837465,24.50,Month-to-Month,4,0,24.2,Mailed Check,Yes\n", + "TEL5374829106,111.80,Two Year,67,2,193.9,Bank Transfer,No\n", + "TEL3746582910,63.70,Month-to-Month,10,3,137.4,Electronic Check,Yes\n", + "TEL9263548107,89.10,One Year,40,1,83.9,Credit Card,No\n", + "TEL2635478109,33.85,Month-to-Month,6,0,32.5,Mailed Check,Yes\n", + "TEL8473920165,108.60,Two Year,65,2,179.9,Bank Transfer,No\n", + "TEL6283749105,53.40,Month-to-Month,11,3,107.8,Electronic Check,Yes\n", + "TEL4192837460,76.20,One Year,43,1,74.1,Credit Card,No\n", + "TEL1746352910,30.50,Month-to-Month,5,0,30.2,Mailed Check,Yes\n", + "TEL7382910564,116.00,Two Year,71,2,205.3,Bank Transfer,No\n", + "TEL5928374610,71.15,Month-to-Month,16,3,157.6,Electronic Check,Yes\n", + "TEL3847592016,97.00,One Year,39,1,90.9,Credit Card,No\n", + "TEL9182736450,38.70,Month-to-Month,3,0,37.4,Mailed Check,Yes\n", + "TEL6354789210,105.20,Two Year,68,2,188.7,Bank Transfer,No\n", + "TEL4738291056,52.55,Month-to-Month,14,3,104.2,Electronic Check,Yes\n", + "TEL1928374650,75.40,One Year,46,1,72.4,Credit Card,No\n", + "TEL7463529108,26.30,Month-to-Month,2,0,26.0,Mailed Check,Yes\n", + "TEL5293847610,113.70,Two Year,66,2,196.8,Bank Transfer,No\n", + "TEL3647582910,65.60,Month-to-Month,15,3,139.1,Electronic Check,Yes\n", + "TEL9182736450,94.35,One Year,42,1,86.8,Credit Card,No\n", + "TEL2736458109,34.30,Month-to-Month,4,0,34.0,Mailed Check,Yes\n", + "TEL8547392016,102.70,Two Year,63,2,173.5,Bank Transfer,No\n", + "TEL6192837450,59.90,Month-to-Month,13,3,121.3,Electronic Check,Yes\n", + "TEL4928374651,82.20,One Year,47,1,79.2,Credit Card,No\n", + "TEL1635294810,23.10,Month-to-Month,6,0,22.8,Mailed Check,Yes\n", + "TEL7283746509,119.30,Two Year,69,2,211.6,Bank Transfer,No\n", + "TEL5829374610,71.40,Month-to-Month,10,3,154.3,Electronic Check,Yes\n", + "TEL3918273640,96.70,One Year,44,1,91.7,Credit Card,No\n", + "TEL9374628105,37.10,Month-to-Month,5,0,36.8,Mailed Check,Yes\n", + "TEL6458293710,106.00,Two Year,70,2,186.1,Bank Transfer,No\n", + "TEL4829374615,55.70,Month-to-Month,12,3,112.0,Electronic Check,Yes\n", + "TEL1536472910,78.80,One Year,41,1,76.5,Credit Card,No\n", + "TEL7192837465,25.00,Month-to-Month,7,0,24.7,Mailed Check,Yes\n", + "TEL5374829106,111.20,Two Year,64,2,191.3,Bank Transfer,No\n", + "TEL3746582910,64.20,Month-to-Month,14,3,136.1,Electronic Check,Yes\n", + "TEL9263548107,90.80,One Year,43,1,82.6,Credit Card,No\n", + "TEL2635478109,33.20,Month-to-Month,5,0,31.9,Mailed Check,Yes\n", + "TEL8473920165,109.10,Two Year,67,2,177.4,Bank Transfer,No\n", + "TEL6283749105,54.00,Month-to-Month,16,3,109.4,Electronic Check,Yes\n", + "TEL4192837460,75.60,One Year,40,1,73.4,Credit Card,No\n", + "TEL1746352910,31.10,Month-to-Month,3,0,30.8,Mailed Check,Yes\n", + "TEL7382910564,115.40,Two Year,65,2,202.7,Bank Transfer,No\n", + "testinggemini.txt\n", + "Keyboard interruption in main thread... closing server.\n", + "Killing tunnel 127.0.0.1:7860 <> https://d076a9fef9034a4f24.gradio.live\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [] + }, + "metadata": {}, + "execution_count": 11 + } + ], + "source": [ + "# Example Scenarios\n", + "\n", + "# Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", + "# Generate a dataset for training a model to approve/reject loan applications. Include features like loan amount, applicant income, co-applicant income, employment type, credit history (binary), loan term, number of dependents, education level, and loan approval status.\n", + "# Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", + "# Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", + "# Create a dataset of hospitalized patients to predict readmission within 30 days. Include patient ID, age, gender, number of prior admissions, diagnosis codes, length of stay, discharge type, medications prescribed, and readmission label.\n", + "# Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", + "\n", + "generator_ui.launch(share=True, debug=True, inbrowser=True)" + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "_9HIC_AzfZBZ" + }, + "execution_count": null, + "outputs": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 045d1e4b36fd6c2130d9745a917de7987fff62be Mon Sep 17 00:00:00 2001 From: SABEEH Shaikh Date: Sat, 24 May 2025 17:37:22 +0200 Subject: [PATCH 02/23] Cleared output of all cells as per feedback given --- .../llm_dataset_generator.ipynb | 2402 +++++------------ 1 file changed, 603 insertions(+), 1799 deletions(-) diff --git a/week3/community-contributions/llm_dataset_generator.ipynb b/week3/community-contributions/llm_dataset_generator.ipynb index 3de4ce1..c407ad4 100644 --- a/week3/community-contributions/llm_dataset_generator.ipynb +++ b/week3/community-contributions/llm_dataset_generator.ipynb @@ -1,1801 +1,605 @@ { - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Synthetic Data Generator Notebook\n", - "## About\n", - "This colab notebook demonstrates the use of Frontier and Open-source LLM models for generating synthetic dataset for a business scenario provided by the user. From a UI interface implemented in gradio, a user can define their business scenario in detail, select the number of records needed along with the its format and adjust the number of max output tokens to be generated by the chosen LLM.\n", - "\n", - "It does not stop here. Once the records have been produced in the LLM output, it can be extracted and stored in a file, format same as set by user before. The file is stored in colab notebook under the contents directory. All of this is extraction is done with the help of the 're' library. My first time using it and I totally enjoyed learning it.\n", - "\n", - "## Outlook\n", - "Sometimes the response is loaded with the user prompt and a lot of tags when using an open-source models, such as Mixtral from Mistral. This is because of the prompt format being used. The 'assistant' 'role' format does not suit them. This is an optimization to look for and can be easily done by using custom prompt template for such models and these templates are hinted on their huggingface repo." - ], - "metadata": { - "id": "SFA6R-4jL7SS" - } - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ip4I4Lff3B2M" - }, - "source": [ - "## Install & Imports" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "8zVlW-GMcBaU", - "outputId": "0c473564-fb93-41a9-c819-e6aa2382d75a" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.2/54.2 MB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m323.1/323.1 kB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.0/264.0 kB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m95.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m78.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m48.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m1.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m83.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.1/76.1 MB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m95.2/95.2 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.6/11.6 MB\u001b[0m \u001b[31m95.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m72.0/72.0 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.5/62.5 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h" - ] - } - ], - "source": [ - "!pip install -q gradio anthropic requests torch bitsandbytes transformers accelerate openai" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "YKVNzE5sFH2l" - }, - "outputs": [], - "source": [ - "# imports\n", - "import re\n", - "import os\n", - "import sys\n", - "import gc\n", - "import io\n", - "import json\n", - "import anthropic\n", - "import gradio as gr\n", - "import requests\n", - "import subprocess\n", - "import google.generativeai as ggai\n", - "import torch\n", - "import tempfile\n", - "import shutil\n", - "from io import StringIO\n", - "import pandas as pd\n", - "from google.colab import userdata\n", - "from huggingface_hub import login\n", - "from openai import OpenAI\n", - "from pathlib import Path\n", - "from datetime import datetime\n", - "from IPython.display import Markdown, display, update_display\n", - "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "LWpD6bZv3mAR" - }, - "source": [ - "## HuggingFace Setup" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "id": "aeC2oWY2FTv7" - }, - "outputs": [], - "source": [ - "# Sign in to HuggingFace Hub\n", - "\n", - "hf_token = userdata.get('HF_TOKEN')\n", - "login(hf_token, add_to_git_credential=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8Au2UPVy3vn5" - }, - "source": [ - "## Frontier Models configuration" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "geBBsd14X3UL" - }, - "outputs": [], - "source": [ - "openai_client = OpenAI(api_key=userdata.get('OPENAI_API_KEY'))\n", - "anthropic_client = anthropic.Anthropic(api_key=userdata.get('ANTHROPIC_API_KEY'))\n", - "ggai.configure(api_key=userdata.get('GOOGLE_API_KEY'))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "tCnDIOlKgjbO" - }, - "source": [ - "## Defining Prompts" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "gkwXZsxofAU1" - }, - "outputs": [], - "source": [ - "system_prompt = \"\"\"\n", - "You are a synthetic dataset generator. Your role is to create synthetic dataset that infers structured data schemas from business scenarios given by the user.\n", - "\n", - "Your task is to:\n", - "1. Understand the user's business problem(s) or use case(s).\n", - "2. Identify the key fields needed to support that scenario.\n", - "3. Define appropriate field names, data types, and formats.\n", - "4. Generate synthetic records that match the inferred schema.\n", - "\n", - "Guidelines:\n", - "- Use realistic field names and values. Do not invent unrelated fields or values.\n", - "- Choose sensible data types: string, integer, float, date, boolean, enum, etc.\n", - "- Respect logical constraints (e.g., age range, date ranges, email formats).\n", - "- Output the dataset in the format the user requests (json, csv, txt, markdown table).\n", - "- If the scenario is vague or broad, make reasonable assumptions and explain them briefly before generating the dataset.\n", - "- Always generate a dataset that supports the business use case logically.\n", - "\n", - "Before generating the data, display the inferred schema in a readable format.\n", - "\"\"\"\n", - "\n", - "# trial_user_prompt = \"I’m building a churn prediction model for a telecom company. Can you generate a synthetic dataset with 100 rows?\"\n", - "def get_user_prompt(business_problem, no_of_samples, file_format):\n", - " return f\"\"\"\n", - " The business scenario for which I want you to generate a dataset is defined below:\n", - " {business_problem}\n", - "\n", - " Generate a synthetic dataset of {no_of_samples} records in {file_format} format.\n", - " When generating the dataset, wrap it between the '<<<>>>' tag. Make sure the tag is there in the output.\n", - " Do not include any other special characters in between the tags, other than the ones required in producing the correct format of data.\n", - " For examples: When a 'csv' format is given, only the ',' character can be used in between the tags.\n", - " \"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yNpVf9-oQdoO" - }, - "source": [ - "### Quanitzation Config" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "id": "3ErZ315MQdU3" - }, - "outputs": [], - "source": [ - "# This allows us to load the model into memory and use less memory\n", - "def get_quantization_config():\n", - " return BitsAndBytesConfig(\n", - " load_in_4bit=True,\n", - " bnb_4bit_use_double_quant=True,\n", - " bnb_4bit_compute_dtype=torch.bfloat16,\n", - " bnb_4bit_quant_type=\"nf4\"\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "clGtRh0N4951" - }, - "source": [ - "## HF Model inference" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "id": "MAhyn1ehb3Dh" - }, - "outputs": [], - "source": [ - "# All in one HuggingFace Model Response function\n", - "def run_hfmodel_and_get_response(prompt, model_name, output_tokens):\n", - " tokenizer = AutoTokenizer.from_pretrained(model_name)\n", - " tokenizer.pad_token = tokenizer.eos_token\n", - " inputs = tokenizer.apply_chat_template(prompt, return_tensors=\"pt\")\n", - " if torch.cuda.is_available():\n", - " inputs = inputs.to(\"cuda\")\n", - " streamer = TextStreamer(tokenizer)\n", - " if \"microsoft/bitnet-b1.58-2B-4T\" in model_name:\n", - " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", trust_remote_code=True)\n", - " elif \"tiiuae/Falcon-E-3B-Instruct\" in model_name:\n", - " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", torch_dtype=torch.float16 )\n", - " else:\n", - " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", quantization_config=get_quantization_config())\n", - " outputs = model.generate(inputs, max_new_tokens=output_tokens, streamer=streamer)\n", - " response = tokenizer.decode(outputs[0])\n", - " del model, inputs, tokenizer, outputs\n", - " gc.collect()\n", - " torch.cuda.empty_cache()\n", - " return response" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Gh_Ny1aM-L8z" - }, - "source": [ - "## Frontier Models Inference" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "id": "h11WlZNhfHCR" - }, - "outputs": [], - "source": [ - "# ChatGPT, Claude and Gemini response function\n", - "def get_chatgpt_response(prompt, model_name, output_tokens):\n", - " response = openai_client.chat.completions.create(\n", - " model=model_name,\n", - " messages=prompt,\n", - " max_tokens=output_tokens,\n", - " )\n", - " return response.choices[0].message.content\n", - "\n", - "def get_claude_response(prompt, model_name, output_tokens):\n", - " response = anthropic_client.messages.create(\n", - " model=model_name,\n", - " max_tokens=output_tokens,\n", - " system=system_prompt,\n", - " messages=[\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": prompt,\n", - " }\n", - " ],\n", - " )\n", - " return response.content[0].text\n", - "\n", - "def get_gemini_response(prompt, model_name, output_tokens):\n", - " model = ggai.GenerativeModel(\n", - " model_name=model_name,\n", - " system_instruction=system_prompt,\n", - " )\n", - "\n", - " response = model.generate_content(prompt, generation_config={\n", - " \"max_output_tokens\": output_tokens,\n", - " \"temperature\": 0.7,\n", - " })\n", - " return response.text" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nzHbM_WQvRgT" - }, - "source": [ - "## Gradio Implementation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uFWZqw1R-al_" - }, - "source": [ - "### Dropdowns Selection Lists" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "id": "rOzEb0o--aD7" - }, - "outputs": [], - "source": [ - "# Dropdown List Values for the user\n", - "MODEL_TYPES=[\"GPT\", \"Claude\", \"Gemini\", \"HuggingFace\"]\n", - "OPENAI_MODEL_NAMES=[\"gpt-4o-mini\", \"gpt-4o\", \"gpt-3.5-turbo\"]\n", - "ANTHROPIC_MODELS=[\"claude-3-7-sonnet-latest\", \"claude-3-5-haiku-latest\", \"claude-3-opus-latest\"]\n", - "GOOGLE_MODELS=[\"gemini-2.0-flash\", \"gemini-1.5-pro\"]\n", - "HUGGINGFACE_MODELS=[\n", - " \"meta-llama/Llama-3.2-3B-Instruct\",\n", - " \"microsoft/bitnet-b1.58-2B-4T\",\n", - " \"ByteDance-Seed/Seed-Coder-8B-Instruct\",\n", - " \"tiiuae/Falcon-E-3B-Instruct\",\n", - " \"Qwen/Qwen2.5-7B-Instruct\"\n", - "]\n", - "MODEL_NAMES = {\n", - " \"GPT\": OPENAI_MODEL_NAMES,\n", - " \"Claude\": ANTHROPIC_MODELS,\n", - " \"Gemini\": GOOGLE_MODELS,\n", - " \"HuggingFace\": HUGGINGFACE_MODELS\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "sbXGL8_4-oKc" - }, - "source": [ - "### UI" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "id": "_0NCY7FgCVHj" - }, - "outputs": [], - "source": [ - "with gr.Blocks() as generator_ui:\n", - " gr.Markdown(\"# 🧠 Business Scenario → Synthetic Dataset Generator\")\n", - "\n", - " with gr.Row():\n", - " with gr.Column(scale=3):\n", - " with gr.Row():\n", - " dataset_size=gr.Number(value=10, label=\"Enter the number of data samples to generate.\", show_label=True)\n", - " format=gr.Dropdown([\"json\", \"csv\", \"txt\", \"markdown\"], label=\"Select the format for the dataset\", show_label=True)\n", - " with gr.Row():\n", - " scenario=gr.Textbox(label=\"Business Scenario\", lines=5, placeholder=\"Describe your business scenario here\")\n", - " with gr.Row():\n", - " error = gr.Markdown(visible=False)\n", - " with gr.Row():\n", - " clear = gr.Button(\"Clear Everything\")\n", - " submit = gr.Button(\"Generate Dataset\", variant=\"primary\")\n", - "\n", - " with gr.Column(scale=1):\n", - " model_type = gr.Dropdown(MODEL_TYPES, label=\"Model Type\", show_label=True, info=\"Select the model type you want to use\")\n", - " model_name = gr.Dropdown(MODEL_NAMES[model_type.value], label=\"Model Name\", show_label=True, allow_custom_value=True, info=\"Select the model name or enter one manually\")\n", - " output_tokens= gr.Number(value=1000, label=\"Enter the max number of output tokens to generate.\", show_label=True, info=\"This will impact the length of the response containg the dataset\")\n", - "\n", - " with gr.Row():\n", - " # Chatbot Interface\n", - " chatbot = gr.Chatbot(\n", - " type='messages',\n", - " label='Chatbot',\n", - " show_label=True,\n", - " height=300,\n", - " resizable=True,\n", - " elem_id=\"chatbot\",\n", - " avatar_images=(\"🧑\", \"🤖\",)\n", - " )\n", - " with gr.Row(variant=\"compact\"):\n", - " extract_btn = gr.Button(\"Extract and Save Dataset\", variant=\"huggingface\", visible=False)\n", - " file_name = gr.Textbox(label=\"Enter file name here (without file extension)\", placeholder=\"e.g. cancer_synthetic, warehouse_synthetic (no digits)\", visible=False)\n", - " with gr.Row():\n", - " markdown_preview = gr.Markdown(visible = False)\n", - " dataset_preview = gr.Textbox(label=\"Dataset Preview\",visible=False)\n", - " with gr.Row():\n", - " file_saved = gr.Textbox(visible=False)\n", - "\n", - " def run_inference(scenario, model_type, model_name, output_tokens, dataset_size, format):\n", - " \"\"\"Run the model and get the response\"\"\"\n", - " model_type=model_type.lower()\n", - " print(f\"scenario: {scenario}\")\n", - " print(f\"model_type: {model_type}\")\n", - " print(f\"model_name: {model_name}\")\n", - " if not scenario.strip():\n", - " return gr.update(value=\"❌ **Error:** Please define a scenario first!\",visible=True), []\n", - "\n", - " user_prompt = get_user_prompt(scenario, dataset_size, format)\n", - " prompt = [\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": user_prompt},\n", - " ]\n", - "\n", - " if model_type == \"gpt\":\n", - " response = get_chatgpt_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", - " elif model_type == \"claude\":\n", - " response = get_claude_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", - " elif model_type == \"gemini\":\n", - " response = get_gemini_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", - " else:\n", - " response = run_hfmodel_and_get_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", - " torch.cuda.empty_cache()\n", - " history = [\n", - " {\"role\": \"user\", \"content\": scenario},\n", - " {\"role\": \"assistant\", \"content\": response}\n", - " ]\n", - " return gr.update(visible=False), history\n", - "\n", - " def extract_dataset_string(response):\n", - " \"\"\"Extract dataset content between defined tags using regex.\"\"\"\n", - " # Remove known artificial tokens (common in HuggingFace or Claude)\n", - " response = re.sub(r\"<\\[.*?\\]>\", \"\", response)\n", - "\n", - " # Remove system or prompt echo if repeated before dataset\n", - " response = re.sub(r\"(?is)^.*?<<<\", \"<<<\", response.strip(), count=1)\n", - "\n", - " # 1. Match strict <<<>>>...<<<>>> tag blocks (use last match)\n", - " matches = re.findall(r\"<<<>>>[\\s\\r\\n]*(.*?)[\\s\\r\\n]*<<<>>>\", response, re.DOTALL)\n", - " if matches:\n", - " return matches[-1].strip()\n", - "\n", - " # 2. Match loose <<< ... >>> format\n", - " matches = re.findall(r\"<<<[\\s\\r\\n]*(.*?)[\\s\\r\\n]*>>>\", response, re.DOTALL)\n", - " if matches:\n", - " return matches[-1].strip()\n", - "\n", - " # 3. Match final fallback: take everything after last <<< as raw data\n", - " last_open = response.rfind(\"<<<\")\n", - " if last_open != -1:\n", - " raw = response[last_open + 3 :].strip()\n", - " # Optionally cut off noisy trailing notes, explanations, etc.\n", - " raw = re.split(r\"\\n\\s*\\n|Explanation:|Note:|---\", raw)[0]\n", - " return raw.strip()\n", - "\n", - " return \"Could not extract dataset! Try again with a different model.\"\n", - "\n", - " def extract_dataset_from_response(chatbot_history, file_name, file_type):\n", - " \"\"\"Extract dataset and update in gradio UI components\"\"\"\n", - " response = chatbot_history[-1][\"content\"]\n", - " if not response:\n", - " return gr.update(visible=True, value=\"Could not find LLM Response! Try again.\"), gr.update(visible=False)\n", - "\n", - " # match = re.search(r'<<<\\s*(.*?)\\s*>>>', response, re.DOTALL)\n", - " # print(match)\n", - " # if match and match.group(1).strip() == \"\":\n", - " # match = re.search(r'<<<>>>\\s*(.*?)\\s*<<<>>>', response, re.DOTALL)\n", - " # print(match)\n", - " # if match is None:\n", - " # return gr.update(visible=True, value=\"Could not extract dataset! Try again with a different model.\"), gr.update(visible=False)\n", - " # dataset = match.group(1).strip()\n", - " dataset = extract_dataset_string(response)\n", - " if dataset == \"Could not extract dataset! Try again with a different model.\":\n", - " return gr.update(visible=True, value=dataset), gr.update(visible=False)\n", - " text = save_dataset(dataset, file_type, file_name)\n", - " return gr.update(visible=True, value=text), gr.update(visible=True, value=dataset)\n", - "\n", - " def save_dataset(dataset, file_format, file_name):\n", - " \"\"\"Save dataset to a file based on the selected format.\"\"\"\n", - " file_name=file_name+\".\"+file_format\n", - " print(dataset)\n", - " print(file_name)\n", - " if file_format == \"json\":\n", - " try:\n", - " data = json.loads(dataset)\n", - " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", - " json.dump(data, f, indent=4)\n", - " return \"Dataset saved successfully!\"\n", - " except:\n", - " return \"Could not save dataset! Try again in another format.\"\n", - " elif file_format == \"csv\":\n", - " try:\n", - " df = pd.read_csv(StringIO(dataset))\n", - " df.to_csv(file_name, index=False)\n", - " return \"Dataset saved successfully!\"\n", - " except:\n", - " return \"Could not save dataset! Try again in another format.\"\n", - " elif file_format == \"txt\":\n", - " try:\n", - " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", - " f.write(dataset)\n", - " return \"Dataset saved successfully!\"\n", - " except:\n", - " return \"Could not save dataset! Try again in another format.\"\n", - "\n", - " def clear_chat():\n", - " \"\"\"Clear the chat history.\"\"\"\n", - " return \"\", [], gr.update(visible=False), gr.update(visible=False)\n", - "\n", - " def show_extract_btn(chatbot_history, format):\n", - " \"\"\"Show the extract button if the response has been displayed in the chatbot and format is not set to markdown\"\"\"\n", - " if chatbot_history == []:\n", - " return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)\n", - " if format == \"markdown\":\n", - " return gr.update(visible=True, value=chatbot_history[1][\"content\"]), gr.update(visible=False), gr.update(visible=False)\n", - " return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)\n", - "\n", - " extract_btn.click(\n", - " fn=extract_dataset_from_response,\n", - " inputs=[chatbot, file_name, format],\n", - " outputs=[file_saved, dataset_preview]\n", - " )\n", - "\n", - " chatbot.change(\n", - " fn=show_extract_btn,\n", - " inputs=[chatbot, format],\n", - " outputs=[markdown_preview, extract_btn, file_name]\n", - " )\n", - "\n", - " model_type.change(\n", - " fn=lambda x: gr.update(choices=MODEL_NAMES[x], value=MODEL_NAMES[x][0]),\n", - " inputs=[model_type],\n", - " outputs=[model_name]\n", - " )\n", - "\n", - " submit.click(\n", - " fn=run_inference,\n", - " inputs=[scenario, model_type, model_name, output_tokens, dataset_size, format],\n", - " outputs=[error, chatbot],\n", - " show_progress=True\n", - " )\n", - "\n", - " clear.click(\n", - " clear_chat,\n", - " outputs=[scenario, chatbot, dataset_preview, file_saved]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "collapsed": true, - "id": "kzDUJahK8uRN", - "outputId": "c5674be2-b262-4439-ae91-4f3e1f49e041" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n", - "* Running on public URL: https://d076a9fef9034a4f24.gradio.live\n", - "\n", - "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" - ] - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "
" - ] - }, - "metadata": {} - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "scenario: Generate a dataset for training a model to approve/reject loan applications. Include features like loan amount, applicant income, co-applicant income, employment type, credit history (binary), loan term, number of dependents, education level, and loan approval status.\n", - "model_type: gpt\n", - "model_name: gpt-4o\n", - "Loan Amount,Applicant Income,Co-applicant Income,Employment Type,Credit History,Loan Term,Number of Dependents,Education Level,Loan Approval Status\n", - "250000,60000,15000,Salaried,1,240,1,Graduate,Approved\n", - "350000,80000,0,Salaried,1,360,2,Graduate,Approved\n", - "120000,30000,10000,Self-employed,0,180,1,Not Graduate,Rejected\n", - "500000,150000,50000,Self-employed,1,300,3,Graduate,Approved\n", - "75000,20000,0,Unemployed,0,120,0,Graduate,Rejected\n", - "275000,75000,25000,Salaried,0,240,2,Not Graduate,Rejected\n", - "100000,40000,20000,Salaried,1,60,0,Graduate,Approved\n", - "310000,95000,0,Self-employed,1,360,1,Graduate,Approved\n", - "450000,50000,0,Self-employed,0,180,4,Not Graduate,Rejected\n", - "200000,55000,20000,Salaried,1,120,3,Graduate,Approved\n", - "100000,35000,0,Unemployed,0,60,0,Not Graduate,Rejected\n", - "230000,68000,13000,Salaried,1,240,1,Graduate,Approved\n", - "330000,99000,40000,Self-employed,1,300,2,Graduate,Approved\n", - "150000,18000,7500,Unemployed,0,48,0,Not Graduate,Rejected\n", - "210000,64000,0,Salaried,0,120,1,Graduate,Rejected\n", - "310000,87000,30000,Self-employed,1,360,2,Graduate,Approved\n", - "50000,22000,7000,Unemployed,0,24,0,Not Graduate,Rejected\n", - "290000,92000,20000,Salaried,1,240,3,Graduate,Approved\n", - "110000,45000,0,Salaried,0,36,0,Graduate,Rejected\n", - "450000,76000,25000,Self-employed,1,360,2,Graduate,Approved\n", - "loan_approval_synthetic.txt\n", - "scenario: Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", - "model_type: gpt\n", - "model_name: gpt-4o\n", - "scenario: Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", - "model_type: gpt\n", - "model_name: gpt-4o\n", - "[\n", - " {\n", - " \"appointment_id\": \"AID001\",\n", - " \"scheduled_date\": \"2023-11-01\",\n", - " \"appointment_date\": \"2023-11-10\",\n", - " \"lead_time\": 9,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 45,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID002\",\n", - " \"scheduled_date\": \"2023-11-03\",\n", - " \"appointment_date\": \"2023-11-15\",\n", - " \"lead_time\": 12,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 34,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID003\",\n", - " \"scheduled_date\": \"2023-11-05\",\n", - " \"appointment_date\": \"2023-11-11\",\n", - " \"lead_time\": 6,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 29,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID004\",\n", - " \"scheduled_date\": \"2023-11-02\",\n", - " \"appointment_date\": \"2023-11-14\",\n", - " \"lead_time\": 12,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 62,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID005\",\n", - " \"scheduled_date\": \"2023-11-06\",\n", - " \"appointment_date\": \"2023-11-13\",\n", - " \"lead_time\": 7,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 21,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID006\",\n", - " \"scheduled_date\": \"2023-11-08\",\n", - " \"appointment_date\": \"2023-11-17\",\n", - " \"lead_time\": 9,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 58,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID007\",\n", - " \"scheduled_date\": \"2023-11-10\",\n", - " \"appointment_date\": \"2023-11-18\",\n", - " \"lead_time\": 8,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 41,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID008\",\n", - " \"scheduled_date\": \"2023-11-07\",\n", - " \"appointment_date\": \"2023-11-12\",\n", - " \"lead_time\": 5,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 67,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID009\",\n", - " \"scheduled_date\": \"2023-11-12\",\n", - " \"appointment_date\": \"2023-11-20\",\n", - " \"lead_time\": 8,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 74,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID010\",\n", - " \"scheduled_date\": \"2023-11-09\",\n", - " \"appointment_date\": \"2023-11-16\",\n", - " \"lead_time\": 7,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 25,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID011\",\n", - " \"scheduled_date\": \"2023-11-13\",\n", - " \"appointment_date\": \"2023-11-21\",\n", - " \"lead_time\": 8,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 32,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID012\",\n", - " \"scheduled_date\": \"2023-11-14\",\n", - " \"appointment_date\": \"2023-11-25\",\n", - " \"lead_time\": 11,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 48,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID013\",\n", - " \"scheduled_date\": \"2023-11-15\",\n", - " \"appointment_date\": \"2023-11-27\",\n", - " \"lead_time\": 12,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 36,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID014\",\n", - " \"scheduled_date\": \"2023-11-17\",\n", - " \"appointment_date\": \"2023-12-02\",\n", - " \"lead_time\": 15,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 28,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID015\",\n", - " \"scheduled_date\": \"2023-11-16\",\n", - " \"appointment_date\": \"2023-12-01\",\n", - " \"lead_time\": 15,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 60,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID016\",\n", - " \"scheduled_date\": \"2023-11-18\",\n", - " \"appointment_date\": \"2023-12-05\",\n", - " \"lead_time\": 17,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 40,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID017\",\n", - " \"scheduled_date\": \"2023-11-19\",\n", - " \"appointment_date\": \"2023-12-03\",\n", - " \"lead_time\": 14,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 19,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID018\",\n", - " \"scheduled_date\": \"2023-11-21\",\n", - " \"appointment_date\": \"2023-12-07\",\n", - " \"lead_time\": 16,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 51,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID019\",\n", - " \"scheduled_date\": \"2023-11-23\",\n", - " \"appointment_date\": \"2023-12-09\",\n", - " \"lead_time\": 16,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 55,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID020\",\n", - " \"scheduled_date\": \"2023-11-22\",\n", - " \"appointment_date\": \"2023-12-08\",\n", - " \"lead_time\": 16,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 23,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID021\",\n", - " \"scheduled_date\": \"2023-11-24\",\n", - " \"appointment_date\": \"2023-12-10\",\n", - " \"lead_time\": 16,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 47,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID022\",\n", - " \"scheduled_date\": \"2023-11-25\",\n", - " \"appointment_date\": \"2023-12-12\",\n", - " \"lead_time\": 17,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 33,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID023\",\n", - " \"scheduled_date\": \"2023-11-27\",\n", - " \"appointment_date\": \"2023-12-14\",\n", - " \"lead_time\": 17,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 42,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID024\",\n", - " \"scheduled_date\": \"2023-11-29\",\n", - " \"appointment_date\": \"2023-12-15\",\n", - " \"lead_time\": 16,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 64,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID025\",\n", - " \"scheduled_date\": \"2023-12-01\",\n", - " \"appointment_date\": \"2023-12-20\",\n", - " \"lead_time\": 19,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 26,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID026\",\n", - " \"scheduled_date\": \"2023-12-03\",\n", - " \"appointment_date\": \"2023-12-22\",\n", - " \"lead_time\": 19,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 31,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID027\",\n", - " \"scheduled_date\": \"2023-12-05\",\n", - " \"appointment_date\": \"2023-12-24\",\n", - " \"lead_time\": 19,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 50,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID028\",\n", - " \"scheduled_date\": \"2023-12-06\",\n", - " \"appointment_date\": \"2023-12-25\",\n", - " \"lead_time\": 19,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 39,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID029\",\n", - " \"scheduled_date\": \"2023-12-07\",\n", - " \"appointment_date\": \"2023-12-27\",\n", - " \"lead_time\": 20,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 71,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID030\",\n", - " \"scheduled_date\": \"2023-12-08\",\n", - " \"appointment_date\": \"2023-12-28\",\n", - " \"lead_time\": 20,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 44,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID031\",\n", - " \"scheduled_date\": \"2023-12-10\",\n", - " \"appointment_date\": \"2023-12-31\",\n", - " \"lead_time\": 21,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 38,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID032\",\n", - " \"scheduled_date\": \"2023-12-11\",\n", - " \"appointment_date\": \"2024-01-02\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 53,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID033\",\n", - " \"scheduled_date\": \"2023-12-13\",\n", - " \"appointment_date\": \"2024-01-04\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 27,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID034\",\n", - " \"scheduled_date\": \"2023-12-15\",\n", - " \"appointment_date\": \"2024-01-06\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 46,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID035\",\n", - " \"scheduled_date\": \"2023-12-17\",\n", - " \"appointment_date\": \"2024-01-09\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 68,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID036\",\n", - " \"scheduled_date\": \"2023-12-19\",\n", - " \"appointment_date\": \"2024-01-10\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 37,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID037\",\n", - " \"scheduled_date\": \"2023-12-20\",\n", - " \"appointment_date\": \"2024-01-12\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 57,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID038\",\n", - " \"scheduled_date\": \"2023-12-22\",\n", - " \"appointment_date\": \"2024-01-14\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 43,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID039\",\n", - " \"scheduled_date\": \"2023-12-23\",\n", - " \"appointment_date\": \"2024-01-16\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 65,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID040\",\n", - " \"scheduled_date\": \"2023-12-25\",\n", - " \"appointment_date\": \"2024-01-17\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 49,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID041\",\n", - " \"scheduled_date\": \"2023-12-27\",\n", - " \"appointment_date\": \"2024-01-20\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 30,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID042\",\n", - " \"scheduled_date\": \"2023-12-29\",\n", - " \"appointment_date\": \"2024-01-22\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 24,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID043\",\n", - " \"scheduled_date\": \"2024-01-01\",\n", - " \"appointment_date\": \"2024-01-25\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 72,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID044\",\n", - " \"scheduled_date\": \"2024-01-03\",\n", - " \"appointment_date\": \"2024-01-27\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 35,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID045\",\n", - " \"scheduled_date\": \"2024-01-04\",\n", - " \"appointment_date\": \"2024-01-28\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 61,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID046\",\n", - " \"scheduled_date\": \"2024-01-05\",\n", - " \"appointment_date\": \"2024-01-30\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 68,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID047\",\n", - " \"scheduled_date\": \"2024-01-07\",\n", - " \"appointment_date\": \"2024-02-01\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 22,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID048\",\n", - " \"scheduled_date\": \"2024-01-08\",\n", - " \"appointment_date\": \"2024-02-03\",\n", - " \"lead_time\": 26,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 52,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID049\",\n", - " \"scheduled_date\": \"2024-01-10\",\n", - " \"appointment_date\": \"2024-02-04\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 73,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID050\",\n", - " \"scheduled_date\": \"2024-01-12\",\n", - " \"appointment_date\": \"2024-02-06\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 56,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID051\",\n", - " \"scheduled_date\": \"2024-01-15\",\n", - " \"appointment_date\": \"2024-02-07\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 62,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID052\",\n", - " \"scheduled_date\": \"2024-01-17\",\n", - " \"appointment_date\": \"2024-02-10\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 80,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID053\",\n", - " \"scheduled_date\": \"2024-01-19\",\n", - " \"appointment_date\": \"2024-02-12\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 29,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID054\",\n", - " \"scheduled_date\": \"2024-01-21\",\n", - " \"appointment_date\": \"2024-02-13\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 66,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID055\",\n", - " \"scheduled_date\": \"2024-01-23\",\n", - " \"appointment_date\": \"2024-02-15\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 77,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID056\",\n", - " \"scheduled_date\": \"2024-01-25\",\n", - " \"appointment_date\": \"2024-02-17\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 54,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID057\",\n", - " \"scheduled_date\": \"2024-01-28\",\n", - " \"appointment_date\": \"2024-02-19\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 28,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID058\",\n", - " \"scheduled_date\": \"2024-01-30\",\n", - " \"appointment_date\": \"2024-02-22\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 45,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID059\",\n", - " \"scheduled_date\": \"2024-02-01\",\n", - " \"appointment_date\": \"2024-02-24\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 69,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID060\",\n", - " \"scheduled_date\": \"2024-02-02\",\n", - " \"appointment_date\": \"2024-02-26\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 51,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID061\",\n", - " \"scheduled_date\": \"2024-02-04\",\n", - " \"appointment_date\": \"2024-02-27\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 33,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID062\",\n", - " \"scheduled_date\": \"2024-02-06\",\n", - " \"appointment_date\": \"2024-03-01\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 84,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID063\",\n", - " \"scheduled_date\": \"2024-02-09\",\n", - " \"appointment_date\": \"2024-03-04\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 47,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID064\",\n", - " \"scheduled_date\": \"2024-02-10\",\n", - " \"appointment_date\": \"2024-03-06\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 59,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID065\",\n", - " \"scheduled_date\": \"2024-02-12\",\n", - " \"appointment_date\": \"2024-03-08\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 20,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID066\",\n", - " \"scheduled_date\": \"2024-02-14\",\n", - " \"appointment_date\": \"2024-03-10\",\n", - " \"lead_time\": 25,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 48,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID067\",\n", - " \"scheduled_date\": \"2024-02-17\",\n", - " \"appointment_date\": \"2024-03-12\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 38,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID068\",\n", - " \"scheduled_date\": \"2024-02-19\",\n", - " \"appointment_date\": \"2024-03-14\",\n", - " \"lead_time\": 24,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 76,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID069\",\n", - " \"scheduled_date\": \"2024-02-21\",\n", - " \"appointment_date\": \"2024-03-15\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 34,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID070\",\n", - " \"scheduled_date\": \"2024-02-23\",\n", - " \"appointment_date\": \"2024-03-17\",\n", - " \"lead_time\": 23,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 26,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID071\",\n", - " \"scheduled_date\": \"2024-02-25\",\n", - " \"appointment_date\": \"2024-03-19\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 22,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 2,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID072\",\n", - " \"scheduled_date\": \"2024-02-27\",\n", - " \"appointment_date\": \"2024-03-20\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 0,\n", - " \"patient_age\": 58,\n", - " \"gender\": \"Other\",\n", - " \"health_condition_severity\": 1,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID073\",\n", - " \"scheduled_date\": \"2024-02-29\",\n", - " \"appointment_date\": \"2024-03-22\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 3,\n", - " \"patient_age\": 67,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 3,\n", - " \"no_show_status\": false\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID074\",\n", - " \"scheduled_date\": \"2024-03-02\",\n", - " \"appointment_date\": \"2024-03-24\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 2,\n", - " \"patient_age\": 32,\n", - " \"gender\": \"Female\",\n", - " \"health_condition_severity\": 4,\n", - " \"no_show_status\": true\n", - " },\n", - " {\n", - " \"appointment_id\": \"AID075\",\n", - " \"scheduled_date\": \"2024-03-04\",\n", - " \"appointment_date\": \"2024-03-26\",\n", - " \"lead_time\": 22,\n", - " \"sms_reminders_sent\": 1,\n", - " \"patient_age\": 46,\n", - " \"gender\": \"Male\",\n", - " \"health_condition_severity\": 5,\n", - " \"no_show_status\": false\n", - " }\n", - "]\n", - "medical_appointment.json\n", - "scenario: Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", - "model_type: claude\n", - "model_name: claude-3-7-sonnet-latest\n", - "scenario: Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", - "model_type: claude\n", - "model_name: claude-3-7-sonnet-latest\n", - "transaction_id,amount,timestamp,merchant_category,customer_location,card_presence,device_type,fraud_label\n", - "TX123456789,45.99,2023-11-01 08:23:15,Retail,New York,Yes,POS Terminal,No\n", - "TX123456790,899.50,2023-11-01 09:45:22,Electronics,Chicago,Yes,POS Terminal,No\n", - "TX123456791,12.35,2023-11-01 10:12:45,Food & Beverage,Los Angeles,No,Mobile,No\n", - "TX123456792,5423.80,2023-11-01 11:30:18,Jewelry,Miami,No,Web Browser,Yes\n", - "TX123456793,76.24,2023-11-01 14:22:56,Groceries,Denver,Yes,POS Terminal,No\n", - "TX123456794,149.99,2023-11-02 07:15:33,Clothing,Seattle,No,Mobile,No\n", - "TX123456795,2500.00,2023-11-02 08:45:12,Electronics,Toronto,No,Web Browser,Yes\n", - "TX123456796,35.50,2023-11-02 12:33:47,Food & Beverage,Boston,Yes,POS Terminal,No\n", - "TX123456797,10.99,2023-11-02 15:20:09,Entertainment,Philadelphia,No,Mobile,No\n", - "TX123456798,750.25,2023-11-02 16:45:18,Travel,San Francisco,No,Web Browser,No\n", - "TX123456799,65.40,2023-11-02 19:22:31,Retail,Austin,Yes,POS Terminal,No\n", - "TX123456800,3299.99,2023-11-03 05:45:22,Electronics,London,No,Web Browser,Yes\n", - "TX123456801,22.50,2023-11-03 08:12:40,Food & Beverage,Atlanta,Yes,POS Terminal,No\n", - "TX123456802,129.95,2023-11-03 10:33:27,Clothing,Chicago,No,Mobile,No\n", - "TX123456803,50.00,2023-11-03 12:15:39,Gas Station,Dallas,Yes,POS Terminal,No\n", - "TX123456804,1999.00,2023-11-03 14:30:45,Electronics,Singapore,No,Web Browser,No\n", - "TX123456805,8.75,2023-11-03 18:22:14,Food & Beverage,Montreal,No,Mobile,No\n", - "TX123456806,459.99,2023-11-04 09:15:33,Home Goods,Houston,Yes,POS Terminal,No\n", - "TX123456807,2750.00,2023-11-04 10:45:28,Travel,Paris,No,Web Browser,Yes\n", - "TX123456808,85.00,2023-11-04 11:33:52,Healthcare,New York,Yes,POS Terminal,No\n", - "TX123456809,17.25,2023-11-04 13:10:44,Food & Beverage,Los Angeles,No,Mobile,No\n", - "TX123456810,150.49,2023-11-04 15:22:18,Entertainment,Miami,No,Mobile,No\n", - "TX123456811,4500.00,2023-11-04 19:45:02,Jewelry,Dubai,No,Web Browser,Yes\n", - "TX123456812,27.99,2023-11-05 08:33:27,Groceries,Seattle,Yes,POS Terminal,No\n", - "TX123456813,1250.00,2023-11-05 10:15:42,Electronics,Tokyo,No,Web Browser,No\n", - "TX123456814,56.75,2023-11-05 12:20:35,Clothing,San Diego,No,Mobile,No\n", - "TX123456815,18.50,2023-11-05 14:30:19,Food & Beverage,Denver,Yes,POS Terminal,No\n", - "TX123456816,3750.25,2023-11-05 16:45:08,Travel,Sydney,No,Web Browser,Yes\n", - "TX123456817,95.00,2023-11-05 18:22:56,Healthcare,Boston,No,Mobile,No\n", - "TX123456818,2345.67,2023-11-05 20:15:33,Electronics,Berlin,No,Web Browser,Yes\n", - "fraud_transactions.csv\n", - "scenario: Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", - "model_type: gemini\n", - "model_name: gemini-1.5-pro\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "WARNING:tornado.access:429 POST /v1beta/models/gemini-1.5-pro:generateContent?%24alt=json%3Benum-encoding%3Dint (127.0.0.1) 409.67ms\n", - "Traceback (most recent call last):\n", - " File \"/usr/local/lib/python3.11/dist-packages/gradio/queueing.py\", line 625, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/gradio/route_utils.py\", line 322, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2181, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 1692, in call_function\n", - " prediction = await anyio.to_thread.run_sync( # type: ignore\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/anyio/to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/anyio/_backends/_asyncio.py\", line 2470, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/anyio/_backends/_asyncio.py\", line 967, in run\n", - " result = context.run(func, *args)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/gradio/utils.py\", line 889, in wrapper\n", - " response = f(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^\n", - " File \"\", line 62, in run_inference\n", - " response = get_gemini_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"\", line 30, in get_gemini_response\n", - " response = model.generate_content(prompt, generation_config={\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n", - " response = self._client.generate_content(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/ai/generativelanguage_v1beta/services/generative_service/client.py\", line 835, in generate_content\n", - " response = rpc(\n", - " ^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/gapic_v1/method.py\", line 131, in __call__\n", - " return wrapped_func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 293, in retry_wrapped_func\n", - " return retry_target(\n", - " ^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 153, in retry_target\n", - " _retry_error_helper(\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_base.py\", line 212, in _retry_error_helper\n", - " raise final_exc from source_exc\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/retry/retry_unary.py\", line 144, in retry_target\n", - " result = target()\n", - " ^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/timeout.py\", line 130, in func_with_timeout\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/api_core/grpc_helpers.py\", line 76, in error_remapped_callable\n", - " return callable_(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/usr/local/lib/python3.11/dist-packages/google/ai/generativelanguage_v1beta/services/generative_service/transports/rest.py\", line 1161, in __call__\n", - " raise core_exceptions.from_http_response(response)\n", - "google.api_core.exceptions.TooManyRequests: 429 POST https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?%24alt=json%3Benum-encoding%3Dint: You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits.\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "scenario: Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", - "model_type: gemini\n", - "model_name: gemini-2.0-flash\n", - "CustomerID,PortfolioValue,Age,IncomeBracket,RiskAppetite,TransactionsPerMonth,PreferredInvestmentType,RiskScore\n", - "1,75000.00,32,Medium,High,8,\"Stocks, Options\",78\n", - "2,120000.50,45,High,Medium,3,\"Bonds, Mutual Funds\",55\n", - "3,30000.75,28,Low,Low,1,\"Bonds\",25\n", - "4,250000.00,58,High,High,12,\"Stocks, Real Estate\",85\n", - "5,80000.25,39,Medium,Medium,5,\"Mutual Funds\",60\n", - "6,150000.00,48,High,Low,2,\"Bonds, ETFs\",40\n", - "7,45000.50,25,Low,Medium,4,\"Stocks\",50\n", - "8,300000.75,62,High,High,15,\"Stocks, Options, Real Estate\",92\n", - "9,90000.00,35,Medium,Medium,6,\"ETFs, Mutual Funds\",65\n", - "10,180000.25,50,High,Low,1,\"Bonds\",35\n", - "11,60000.50,29,Low,Low,2,\"Bonds, ETFs\",30\n", - "12,400000.00,65,High,High,18,\"Stocks, Options, Cryptocurrency\",95\n", - "13,100000.75,42,Medium,Medium,7,\"Mutual Funds, Real Estate\",70\n", - "14,200000.00,55,High,Low,0,\"Bonds, Annuities\",20\n", - "15,70000.25,31,Low,Medium,3,\"Stocks, ETFs\",58\n", - "16,130000.50,47,High,Medium,4,\"Bonds, Mutual Funds\",52\n", - "17,35000.75,27,Low,Low,1,\"Bonds\",28\n", - "18,280000.00,60,High,High,14,\"Stocks, Real Estate\",88\n", - "19,85000.25,37,Medium,Medium,5,\"ETFs\",63\n", - "20,160000.00,52,High,Low,2,\"Bonds, CDs\",38\n", - "21,50000.50,26,Low,Low,1,\"Bonds, Government Securities\",22\n", - "22,450000.75,68,High,High,20,\"Stocks, Options, Venture Capital\",97\n", - "23,110000.00,44,Medium,Medium,8,\"Mutual Funds, ETFs\",73\n", - "24,220000.25,57,High,Low,0,\"Bonds, Treasury Bills\",18\n", - "25,72000.50,33,Low,Medium,4,\"Stocks\",56\n", - "26,140000.00,49,High,Medium,3,\"Bonds, Mutual Funds\",54\n", - "27,32000.75,29,Low,Low,1,\"Bonds\",26\n", - "28,260000.00,61,High,High,13,\"Stocks, Real Estate\",86\n", - "29,82000.25,38,Medium,Medium,6,\"ETFs, Index Funds\",61\n", - "30,170000.50,53,High,Low,2,\"Bonds\",36\n", - "31,55000.75,24,Low,Low,2,\"Bonds, Money Market Accounts\",24\n", - "32,350000.00,64,High,High,17,\"Stocks, Options, Commodities\",93\n", - "33,95000.25,41,Medium,Medium,7,\"Mutual Funds, REITs\",68\n", - "34,190000.50,56,High,Low,0,\"Bonds, Fixed Income\",19\n", - "35,65000.00,30,Low,Medium,3,\"Stocks, Small Cap Stocks\",59\n", - "36,125000.75,46,High,Medium,4,\"Bonds, Large Cap Funds\",51\n", - "37,33000.25,28,Low,Low,1,\"Bonds\",27\n", - "38,270000.50,59,High,High,14,\"Stocks, Emerging Markets\",87\n", - "39,88000.00,36,Medium,Medium,5,\"ETFs, Balanced Funds\",64\n", - "40,155000.75,51,High,Low,2,\"Bonds, Corporate Bonds\",37\n", - "41,48000.25,25,Low,Low,1,\"Bonds, Municipal Bonds\",21\n", - "42,420000.00,67,High,High,19,\"Stocks, Options, Derivatives\",96\n", - "43,105000.75,43,Medium,Medium,8,\"Mutual Funds, Sector Funds\",71\n", - "44,210000.00,54,High,Low,0,\"Bonds, Government Bonds\",17\n", - "45,71000.25,32,Low,Medium,4,\"Stocks\",57\n", - "46,135000.50,48,High,Medium,3,\"Bonds, Index Funds\",53\n", - "47,34000.75,27,Low,Low,1,\"Bonds\",29\n", - "48,290000.00,63,High,High,16,\"Stocks, Real Estate, Private Equity\",90\n", - "49,89000.25,40,Medium,Medium,6,\"ETFs\",62\n", - "50,175000.50,50,High,Low,2,\"Bonds, Preferred Stocks\",39\n", - "investment_customers.csv\n", - "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", - "model_type: gemini\n", - "model_name: gemini-2.0-flash\n", - "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", - "model_type: gemini\n", - "model_name: gemini-2.0-flash\n", - "\n", - "testinggemini.json\n", - "scenario: Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", - "model_type: gemini\n", - "model_name: gemini-2.0-flash\n", - "CustomerID,MonthlyCharges,ContractType,Tenure,SupportCalls,InternetUsage,PaymentMethod,Churned\n", - "TEL2847592374,67.55,Month-to-Month,9,3,145.2,Electronic Check,Yes\n", - "TEL9283746510,92.30,One Year,48,1,87.9,Credit Card,No\n", - "TEL1837465921,25.00,Month-to-Month,2,0,25.6,Mailed Check,Yes\n", - "TEL7364582910,115.75,Two Year,65,2,203.4,Bank Transfer,No\n", - "TEL5928374615,48.20,Month-to-Month,15,4,98.7,Electronic Check,Yes\n", - "TEL3847592016,78.90,One Year,36,1,167.1,Credit Card,No\n", - "TEL8273645910,31.50,Month-to-Month,3,0,30.2,Mailed Check,Yes\n", - "TEL6354789210,102.40,Two Year,70,3,185.9,Bank Transfer,No\n", - "TEL4738291056,55.85,Month-to-Month,11,2,112.5,Electronic Check,Yes\n", - "TEL1928374650,85.60,One Year,42,1,76.3,Credit Card,No\n", - "TEL7463529108,28.75,Month-to-Month,5,0,28.9,Mailed Check,Yes\n", - "TEL5293847610,110.30,Two Year,68,2,192.7,Bank Transfer,No\n", - "TEL3647582910,62.10,Month-to-Month,13,3,134.8,Electronic Check,Yes\n", - "TEL9182736450,98.45,One Year,39,1,91.5,Credit Card,No\n", - "TEL2736458109,34.90,Month-to-Month,7,0,33.6,Mailed Check,Yes\n", - "TEL8547392016,107.60,Two Year,62,2,179.3,Bank Transfer,No\n", - "TEL6192837450,59.35,Month-to-Month,10,3,123.4,Electronic Check,Yes\n", - "TEL4928374651,82.90,One Year,45,1,82.1,Credit Card,No\n", - "TEL1635294810,22.50,Month-to-Month,4,0,22.3,Mailed Check,Yes\n", - "TEL7283746509,118.20,Two Year,71,2,210.5,Bank Transfer,No\n", - "TEL5829374610,69.70,Month-to-Month,12,3,156.9,Electronic Check,Yes\n", - "TEL3918273640,95.15,One Year,40,1,89.7,Credit Card,No\n", - "TEL9374628105,37.40,Month-to-Month,6,0,36.2,Mailed Check,Yes\n", - "TEL6458293710,104.90,Two Year,67,2,188.1,Bank Transfer,No\n", - "TEL4829374615,57.10,Month-to-Month,14,3,118.2,Electronic Check,Yes\n", - "TEL1536472910,80.55,One Year,43,1,78.9,Credit Card,No\n", - "TEL7192837465,25.30,Month-to-Month,2,0,25.9,Mailed Check,Yes\n", - "TEL5374829106,112.90,Two Year,69,2,195.3,Bank Transfer,No\n", - "TEL3746582910,64.85,Month-to-Month,8,3,140.6,Electronic Check,Yes\n", - "TEL9263548107,90.20,One Year,46,1,85.5,Credit Card,No\n", - "TEL2635478109,32.65,Month-to-Month,4,0,31.4,Mailed Check,Yes\n", - "TEL8473920165,109.70,Two Year,63,2,182.5,Bank Transfer,No\n", - "TEL6283749105,54.50,Month-to-Month,16,3,110.1,Electronic Check,Yes\n", - "TEL4192837460,77.30,One Year,41,1,75.2,Credit Card,No\n", - "TEL1746352910,29.90,Month-to-Month,5,0,29.6,Mailed Check,Yes\n", - "TEL7382910564,117.10,Two Year,72,2,207.9,Bank Transfer,No\n", - "TEL5928374610,72.00,Month-to-Month,13,3,159.7,Electronic Check,Yes\n", - "TEL3847592016,97.85,One Year,38,1,93.2,Credit Card,No\n", - "TEL9182736450,39.55,Month-to-Month,7,0,38.3,Mailed Check,Yes\n", - "TEL6354789210,106.30,Two Year,66,2,190.8,Bank Transfer,No\n", - "TEL4738291056,51.75,Month-to-Month,11,3,105.9,Electronic Check,Yes\n", - "TEL1928374650,74.60,One Year,44,1,73.1,Credit Card,No\n", - "TEL7463529108,27.10,Month-to-Month,3,0,26.7,Mailed Check,Yes\n", - "TEL5293847610,114.50,Two Year,70,2,198.6,Bank Transfer,No\n", - "TEL3647582910,66.45,Month-to-Month,12,3,138.5,Electronic Check,Yes\n", - "TEL9182736450,93.50,One Year,47,1,84.2,Credit Card,No\n", - "TEL2736458109,35.15,Month-to-Month,6,0,34.9,Mailed Check,Yes\n", - "TEL8547392016,103.80,Two Year,64,2,176.1,Bank Transfer,No\n", - "TEL6192837450,58.20,Month-to-Month,14,3,120.7,Electronic Check,Yes\n", - "TEL4928374651,81.65,One Year,41,1,80.5,Credit Card,No\n", - "TEL1635294810,23.70,Month-to-Month,5,0,23.4,Mailed Check,Yes\n", - "TEL7283746509,119.90,Two Year,68,2,213.2,Bank Transfer,No\n", - "TEL5829374610,70.85,Month-to-Month,9,3,153.7,Electronic Check,Yes\n", - "TEL3918273640,96.20,One Year,45,1,92.4,Credit Card,No\n", - "TEL9374628105,36.80,Month-to-Month,7,0,35.6,Mailed Check,Yes\n", - "TEL6458293710,105.50,Two Year,69,2,185.4,Bank Transfer,No\n", - "TEL4829374615,56.30,Month-to-Month,15,3,115.1,Electronic Check,Yes\n", - "TEL1536472910,79.40,One Year,42,1,77.8,Credit Card,No\n", - "TEL7192837465,24.50,Month-to-Month,4,0,24.2,Mailed Check,Yes\n", - "TEL5374829106,111.80,Two Year,67,2,193.9,Bank Transfer,No\n", - "TEL3746582910,63.70,Month-to-Month,10,3,137.4,Electronic Check,Yes\n", - "TEL9263548107,89.10,One Year,40,1,83.9,Credit Card,No\n", - "TEL2635478109,33.85,Month-to-Month,6,0,32.5,Mailed Check,Yes\n", - "TEL8473920165,108.60,Two Year,65,2,179.9,Bank Transfer,No\n", - "TEL6283749105,53.40,Month-to-Month,11,3,107.8,Electronic Check,Yes\n", - "TEL4192837460,76.20,One Year,43,1,74.1,Credit Card,No\n", - "TEL1746352910,30.50,Month-to-Month,5,0,30.2,Mailed Check,Yes\n", - "TEL7382910564,116.00,Two Year,71,2,205.3,Bank Transfer,No\n", - "TEL5928374610,71.15,Month-to-Month,16,3,157.6,Electronic Check,Yes\n", - "TEL3847592016,97.00,One Year,39,1,90.9,Credit Card,No\n", - "TEL9182736450,38.70,Month-to-Month,3,0,37.4,Mailed Check,Yes\n", - "TEL6354789210,105.20,Two Year,68,2,188.7,Bank Transfer,No\n", - "TEL4738291056,52.55,Month-to-Month,14,3,104.2,Electronic Check,Yes\n", - "TEL1928374650,75.40,One Year,46,1,72.4,Credit Card,No\n", - "TEL7463529108,26.30,Month-to-Month,2,0,26.0,Mailed Check,Yes\n", - "TEL5293847610,113.70,Two Year,66,2,196.8,Bank Transfer,No\n", - "TEL3647582910,65.60,Month-to-Month,15,3,139.1,Electronic Check,Yes\n", - "TEL9182736450,94.35,One Year,42,1,86.8,Credit Card,No\n", - "TEL2736458109,34.30,Month-to-Month,4,0,34.0,Mailed Check,Yes\n", - "TEL8547392016,102.70,Two Year,63,2,173.5,Bank Transfer,No\n", - "TEL6192837450,59.90,Month-to-Month,13,3,121.3,Electronic Check,Yes\n", - "TEL4928374651,82.20,One Year,47,1,79.2,Credit Card,No\n", - "TEL1635294810,23.10,Month-to-Month,6,0,22.8,Mailed Check,Yes\n", - "TEL7283746509,119.30,Two Year,69,2,211.6,Bank Transfer,No\n", - "TEL5829374610,71.40,Month-to-Month,10,3,154.3,Electronic Check,Yes\n", - "TEL3918273640,96.70,One Year,44,1,91.7,Credit Card,No\n", - "TEL9374628105,37.10,Month-to-Month,5,0,36.8,Mailed Check,Yes\n", - "TEL6458293710,106.00,Two Year,70,2,186.1,Bank Transfer,No\n", - "TEL4829374615,55.70,Month-to-Month,12,3,112.0,Electronic Check,Yes\n", - "TEL1536472910,78.80,One Year,41,1,76.5,Credit Card,No\n", - "TEL7192837465,25.00,Month-to-Month,7,0,24.7,Mailed Check,Yes\n", - "TEL5374829106,111.20,Two Year,64,2,191.3,Bank Transfer,No\n", - "TEL3746582910,64.20,Month-to-Month,14,3,136.1,Electronic Check,Yes\n", - "TEL9263548107,90.80,One Year,43,1,82.6,Credit Card,No\n", - "TEL2635478109,33.20,Month-to-Month,5,0,31.9,Mailed Check,Yes\n", - "TEL8473920165,109.10,Two Year,67,2,177.4,Bank Transfer,No\n", - "TEL6283749105,54.00,Month-to-Month,16,3,109.4,Electronic Check,Yes\n", - "TEL4192837460,75.60,One Year,40,1,73.4,Credit Card,No\n", - "TEL1746352910,31.10,Month-to-Month,3,0,30.8,Mailed Check,Yes\n", - "TEL7382910564,115.40,Two Year,65,2,202.7,Bank Transfer,No\n", - "testinggemini.txt\n", - "Keyboard interruption in main thread... closing server.\n", - "Killing tunnel 127.0.0.1:7860 <> https://d076a9fef9034a4f24.gradio.live\n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [] - }, - "metadata": {}, - "execution_count": 11 - } - ], - "source": [ - "# Example Scenarios\n", - "\n", - "# Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", - "# Generate a dataset for training a model to approve/reject loan applications. Include features like loan amount, applicant income, co-applicant income, employment type, credit history (binary), loan term, number of dependents, education level, and loan approval status.\n", - "# Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", - "# Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", - "# Create a dataset of hospitalized patients to predict readmission within 30 days. Include patient ID, age, gender, number of prior admissions, diagnosis codes, length of stay, discharge type, medications prescribed, and readmission label.\n", - "# Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", - "\n", - "generator_ui.launch(share=True, debug=True, inbrowser=True)" - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "_9HIC_AzfZBZ" - }, - "execution_count": null, - "outputs": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "SFA6R-4jL7SS" + }, + "source": [ + "# Synthetic Data Generator Notebook\n", + "## About\n", + "This colab notebook demonstrates the use of Frontier and Open-source LLM models for generating synthetic dataset for a business scenario provided by the user. From a UI interface implemented in gradio, a user can define their business scenario in detail, select the number of records needed along with the its format and adjust the number of max output tokens to be generated by the chosen LLM.\n", + "\n", + "It does not stop here. Once the records have been produced in the LLM output, it can be extracted and stored in a file, format same as set by user before. The file is stored in colab notebook under the contents directory. All of this is extraction is done with the help of the 're' library. My first time using it and I totally enjoyed learning it.\n", + "\n", + "## Outlook\n", + "Sometimes the response is loaded with the user prompt and a lot of tags when using an open-source models, such as Mixtral from Mistral. This is because of the prompt format being used. The 'assistant' 'role' format does not suit them. This is an optimization to look for and can be easily done by using custom prompt template for such models and these templates are hinted on their huggingface repo." + ] }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file + { + "cell_type": "markdown", + "metadata": { + "id": "ip4I4Lff3B2M" + }, + "source": [ + "## Install & Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8zVlW-GMcBaU", + "outputId": "0c473564-fb93-41a9-c819-e6aa2382d75a" + }, + "outputs": [], + "source": [ + "!pip install -q gradio anthropic requests torch bitsandbytes transformers accelerate openai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YKVNzE5sFH2l" + }, + "outputs": [], + "source": [ + "# imports\n", + "import re\n", + "import os\n", + "import sys\n", + "import gc\n", + "import io\n", + "import json\n", + "import anthropic\n", + "import gradio as gr\n", + "import requests\n", + "import subprocess\n", + "import google.generativeai as ggai\n", + "import torch\n", + "import tempfile\n", + "import shutil\n", + "from io import StringIO\n", + "import pandas as pd\n", + "from google.colab import userdata\n", + "from huggingface_hub import login\n", + "from openai import OpenAI\n", + "from pathlib import Path\n", + "from datetime import datetime\n", + "from IPython.display import Markdown, display, update_display\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LWpD6bZv3mAR" + }, + "source": [ + "## HuggingFace Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aeC2oWY2FTv7" + }, + "outputs": [], + "source": [ + "# Sign in to HuggingFace Hub\n", + "\n", + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8Au2UPVy3vn5" + }, + "source": [ + "## Frontier Models configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "geBBsd14X3UL" + }, + "outputs": [], + "source": [ + "openai_client = OpenAI(api_key=userdata.get('OPENAI_API_KEY'))\n", + "anthropic_client = anthropic.Anthropic(api_key=userdata.get('ANTHROPIC_API_KEY'))\n", + "ggai.configure(api_key=userdata.get('GOOGLE_API_KEY'))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tCnDIOlKgjbO" + }, + "source": [ + "## Defining Prompts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gkwXZsxofAU1" + }, + "outputs": [], + "source": [ + "system_prompt = \"\"\"\n", + "You are a synthetic dataset generator. Your role is to create synthetic dataset that infers structured data schemas from business scenarios given by the user.\n", + "\n", + "Your task is to:\n", + "1. Understand the user's business problem(s) or use case(s).\n", + "2. Identify the key fields needed to support that scenario.\n", + "3. Define appropriate field names, data types, and formats.\n", + "4. Generate synthetic records that match the inferred schema.\n", + "\n", + "Guidelines:\n", + "- Use realistic field names and values. Do not invent unrelated fields or values.\n", + "- Choose sensible data types: string, integer, float, date, boolean, enum, etc.\n", + "- Respect logical constraints (e.g., age range, date ranges, email formats).\n", + "- Output the dataset in the format the user requests (json, csv, txt, markdown table).\n", + "- If the scenario is vague or broad, make reasonable assumptions and explain them briefly before generating the dataset.\n", + "- Always generate a dataset that supports the business use case logically.\n", + "\n", + "Before generating the data, display the inferred schema in a readable format.\n", + "\"\"\"\n", + "\n", + "# trial_user_prompt = \"I’m building a churn prediction model for a telecom company. Can you generate a synthetic dataset with 100 rows?\"\n", + "def get_user_prompt(business_problem, no_of_samples, file_format):\n", + " return f\"\"\"\n", + " The business scenario for which I want you to generate a dataset is defined below:\n", + " {business_problem}\n", + "\n", + " Generate a synthetic dataset of {no_of_samples} records in {file_format} format.\n", + " When generating the dataset, wrap it between the '<<<>>>' tag. Make sure the tag is there in the output.\n", + " Do not include any other special characters in between the tags, other than the ones required in producing the correct format of data.\n", + " For examples: When a 'csv' format is given, only the ',' character can be used in between the tags.\n", + " \"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yNpVf9-oQdoO" + }, + "source": [ + "### Quanitzation Config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3ErZ315MQdU3" + }, + "outputs": [], + "source": [ + "# This allows us to load the model into memory and use less memory\n", + "def get_quantization_config():\n", + " return BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "clGtRh0N4951" + }, + "source": [ + "## HF Model inference" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MAhyn1ehb3Dh" + }, + "outputs": [], + "source": [ + "# All in one HuggingFace Model Response function\n", + "def run_hfmodel_and_get_response(prompt, model_name, output_tokens):\n", + " tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + " inputs = tokenizer.apply_chat_template(prompt, return_tensors=\"pt\")\n", + " if torch.cuda.is_available():\n", + " inputs = inputs.to(\"cuda\")\n", + " streamer = TextStreamer(tokenizer)\n", + " if \"microsoft/bitnet-b1.58-2B-4T\" in model_name:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", trust_remote_code=True)\n", + " elif \"tiiuae/Falcon-E-3B-Instruct\" in model_name:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", torch_dtype=torch.float16 )\n", + " else:\n", + " model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", quantization_config=get_quantization_config())\n", + " outputs = model.generate(inputs, max_new_tokens=output_tokens, streamer=streamer)\n", + " response = tokenizer.decode(outputs[0])\n", + " del model, inputs, tokenizer, outputs\n", + " gc.collect()\n", + " torch.cuda.empty_cache()\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Gh_Ny1aM-L8z" + }, + "source": [ + "## Frontier Models Inference" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "h11WlZNhfHCR" + }, + "outputs": [], + "source": [ + "# ChatGPT, Claude and Gemini response function\n", + "def get_chatgpt_response(prompt, model_name, output_tokens):\n", + " response = openai_client.chat.completions.create(\n", + " model=model_name,\n", + " messages=prompt,\n", + " max_tokens=output_tokens,\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "def get_claude_response(prompt, model_name, output_tokens):\n", + " response = anthropic_client.messages.create(\n", + " model=model_name,\n", + " max_tokens=output_tokens,\n", + " system=system_prompt,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": prompt,\n", + " }\n", + " ],\n", + " )\n", + " return response.content[0].text\n", + "\n", + "def get_gemini_response(prompt, model_name, output_tokens):\n", + " model = ggai.GenerativeModel(\n", + " model_name=model_name,\n", + " system_instruction=system_prompt,\n", + " )\n", + "\n", + " response = model.generate_content(prompt, generation_config={\n", + " \"max_output_tokens\": output_tokens,\n", + " \"temperature\": 0.7,\n", + " })\n", + " return response.text" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nzHbM_WQvRgT" + }, + "source": [ + "## Gradio Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uFWZqw1R-al_" + }, + "source": [ + "### Dropdowns Selection Lists" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rOzEb0o--aD7" + }, + "outputs": [], + "source": [ + "# Dropdown List Values for the user\n", + "MODEL_TYPES=[\"GPT\", \"Claude\", \"Gemini\", \"HuggingFace\"]\n", + "OPENAI_MODEL_NAMES=[\"gpt-4o-mini\", \"gpt-4o\", \"gpt-3.5-turbo\"]\n", + "ANTHROPIC_MODELS=[\"claude-3-7-sonnet-latest\", \"claude-3-5-haiku-latest\", \"claude-3-opus-latest\"]\n", + "GOOGLE_MODELS=[\"gemini-2.0-flash\", \"gemini-1.5-pro\"]\n", + "HUGGINGFACE_MODELS=[\n", + " \"meta-llama/Llama-3.2-3B-Instruct\",\n", + " \"microsoft/bitnet-b1.58-2B-4T\",\n", + " \"ByteDance-Seed/Seed-Coder-8B-Instruct\",\n", + " \"tiiuae/Falcon-E-3B-Instruct\",\n", + " \"Qwen/Qwen2.5-7B-Instruct\"\n", + "]\n", + "MODEL_NAMES = {\n", + " \"GPT\": OPENAI_MODEL_NAMES,\n", + " \"Claude\": ANTHROPIC_MODELS,\n", + " \"Gemini\": GOOGLE_MODELS,\n", + " \"HuggingFace\": HUGGINGFACE_MODELS\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sbXGL8_4-oKc" + }, + "source": [ + "### UI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_0NCY7FgCVHj" + }, + "outputs": [], + "source": [ + "with gr.Blocks() as generator_ui:\n", + " gr.Markdown(\"# 🧠 Business Scenario → Synthetic Dataset Generator\")\n", + "\n", + " with gr.Row():\n", + " with gr.Column(scale=3):\n", + " with gr.Row():\n", + " dataset_size=gr.Number(value=10, label=\"Enter the number of data samples to generate.\", show_label=True)\n", + " format=gr.Dropdown([\"json\", \"csv\", \"txt\", \"markdown\"], label=\"Select the format for the dataset\", show_label=True)\n", + " with gr.Row():\n", + " scenario=gr.Textbox(label=\"Business Scenario\", lines=5, placeholder=\"Describe your business scenario here\")\n", + " with gr.Row():\n", + " error = gr.Markdown(visible=False)\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear Everything\")\n", + " submit = gr.Button(\"Generate Dataset\", variant=\"primary\")\n", + "\n", + " with gr.Column(scale=1):\n", + " model_type = gr.Dropdown(MODEL_TYPES, label=\"Model Type\", show_label=True, info=\"Select the model type you want to use\")\n", + " model_name = gr.Dropdown(MODEL_NAMES[model_type.value], label=\"Model Name\", show_label=True, allow_custom_value=True, info=\"Select the model name or enter one manually\")\n", + " output_tokens= gr.Number(value=1000, label=\"Enter the max number of output tokens to generate.\", show_label=True, info=\"This will impact the length of the response containg the dataset\")\n", + "\n", + " with gr.Row():\n", + " # Chatbot Interface\n", + " chatbot = gr.Chatbot(\n", + " type='messages',\n", + " label='Chatbot',\n", + " show_label=True,\n", + " height=300,\n", + " resizable=True,\n", + " elem_id=\"chatbot\",\n", + " avatar_images=(\"🧑\", \"🤖\",)\n", + " )\n", + " with gr.Row(variant=\"compact\"):\n", + " extract_btn = gr.Button(\"Extract and Save Dataset\", variant=\"huggingface\", visible=False)\n", + " file_name = gr.Textbox(label=\"Enter file name here (without file extension)\", placeholder=\"e.g. cancer_synthetic, warehouse_synthetic (no digits)\", visible=False)\n", + " with gr.Row():\n", + " markdown_preview = gr.Markdown(visible = False)\n", + " dataset_preview = gr.Textbox(label=\"Dataset Preview\",visible=False)\n", + " with gr.Row():\n", + " file_saved = gr.Textbox(visible=False)\n", + "\n", + " def run_inference(scenario, model_type, model_name, output_tokens, dataset_size, format):\n", + " \"\"\"Run the model and get the response\"\"\"\n", + " model_type=model_type.lower()\n", + " print(f\"scenario: {scenario}\")\n", + " print(f\"model_type: {model_type}\")\n", + " print(f\"model_name: {model_name}\")\n", + " if not scenario.strip():\n", + " return gr.update(value=\"❌ **Error:** Please define a scenario first!\",visible=True), []\n", + "\n", + " user_prompt = get_user_prompt(scenario, dataset_size, format)\n", + " prompt = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ]\n", + "\n", + " if model_type == \"gpt\":\n", + " response = get_chatgpt_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", + " elif model_type == \"claude\":\n", + " response = get_claude_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", + " elif model_type == \"gemini\":\n", + " response = get_gemini_response(prompt=user_prompt, model_name=model_name, output_tokens=output_tokens)\n", + " else:\n", + " response = run_hfmodel_and_get_response(prompt=prompt, model_name=model_name, output_tokens=output_tokens)\n", + " torch.cuda.empty_cache()\n", + " history = [\n", + " {\"role\": \"user\", \"content\": scenario},\n", + " {\"role\": \"assistant\", \"content\": response}\n", + " ]\n", + " return gr.update(visible=False), history\n", + "\n", + " def extract_dataset_string(response):\n", + " \"\"\"Extract dataset content between defined tags using regex.\"\"\"\n", + " # Remove known artificial tokens (common in HuggingFace or Claude)\n", + " response = re.sub(r\"<\\[.*?\\]>\", \"\", response)\n", + "\n", + " # Remove system or prompt echo if repeated before dataset\n", + " response = re.sub(r\"(?is)^.*?<<<\", \"<<<\", response.strip(), count=1)\n", + "\n", + " # 1. Match strict <<<>>>...<<<>>> tag blocks (use last match)\n", + " matches = re.findall(r\"<<<>>>[\\s\\r\\n]*(.*?)[\\s\\r\\n]*<<<>>>\", response, re.DOTALL)\n", + " if matches:\n", + " return matches[-1].strip()\n", + "\n", + " # 2. Match loose <<< ... >>> format\n", + " matches = re.findall(r\"<<<[\\s\\r\\n]*(.*?)[\\s\\r\\n]*>>>\", response, re.DOTALL)\n", + " if matches:\n", + " return matches[-1].strip()\n", + "\n", + " # 3. Match final fallback: take everything after last <<< as raw data\n", + " last_open = response.rfind(\"<<<\")\n", + " if last_open != -1:\n", + " raw = response[last_open + 3 :].strip()\n", + " # Optionally cut off noisy trailing notes, explanations, etc.\n", + " raw = re.split(r\"\\n\\s*\\n|Explanation:|Note:|---\", raw)[0]\n", + " return raw.strip()\n", + "\n", + " return \"Could not extract dataset! Try again with a different model.\"\n", + "\n", + " def extract_dataset_from_response(chatbot_history, file_name, file_type):\n", + " \"\"\"Extract dataset and update in gradio UI components\"\"\"\n", + " response = chatbot_history[-1][\"content\"]\n", + " if not response:\n", + " return gr.update(visible=True, value=\"Could not find LLM Response! Try again.\"), gr.update(visible=False)\n", + "\n", + " # match = re.search(r'<<<\\s*(.*?)\\s*>>>', response, re.DOTALL)\n", + " # print(match)\n", + " # if match and match.group(1).strip() == \"\":\n", + " # match = re.search(r'<<<>>>\\s*(.*?)\\s*<<<>>>', response, re.DOTALL)\n", + " # print(match)\n", + " # if match is None:\n", + " # return gr.update(visible=True, value=\"Could not extract dataset! Try again with a different model.\"), gr.update(visible=False)\n", + " # dataset = match.group(1).strip()\n", + " dataset = extract_dataset_string(response)\n", + " if dataset == \"Could not extract dataset! Try again with a different model.\":\n", + " return gr.update(visible=True, value=dataset), gr.update(visible=False)\n", + " text = save_dataset(dataset, file_type, file_name)\n", + " return gr.update(visible=True, value=text), gr.update(visible=True, value=dataset)\n", + "\n", + " def save_dataset(dataset, file_format, file_name):\n", + " \"\"\"Save dataset to a file based on the selected format.\"\"\"\n", + " file_name=file_name+\".\"+file_format\n", + " print(dataset)\n", + " print(file_name)\n", + " if file_format == \"json\":\n", + " try:\n", + " data = json.loads(dataset)\n", + " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", + " json.dump(data, f, indent=4)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + " elif file_format == \"csv\":\n", + " try:\n", + " df = pd.read_csv(StringIO(dataset))\n", + " df.to_csv(file_name, index=False)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + " elif file_format == \"txt\":\n", + " try:\n", + " with open(file_name, \"w\", encoding=\"utf-8\") as f:\n", + " f.write(dataset)\n", + " return \"Dataset saved successfully!\"\n", + " except:\n", + " return \"Could not save dataset! Try again in another format.\"\n", + "\n", + " def clear_chat():\n", + " \"\"\"Clear the chat history.\"\"\"\n", + " return \"\", [], gr.update(visible=False), gr.update(visible=False)\n", + "\n", + " def show_extract_btn(chatbot_history, format):\n", + " \"\"\"Show the extract button if the response has been displayed in the chatbot and format is not set to markdown\"\"\"\n", + " if chatbot_history == []:\n", + " return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)\n", + " if format == \"markdown\":\n", + " return gr.update(visible=True, value=chatbot_history[1][\"content\"]), gr.update(visible=False), gr.update(visible=False)\n", + " return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)\n", + "\n", + " extract_btn.click(\n", + " fn=extract_dataset_from_response,\n", + " inputs=[chatbot, file_name, format],\n", + " outputs=[file_saved, dataset_preview]\n", + " )\n", + "\n", + " chatbot.change(\n", + " fn=show_extract_btn,\n", + " inputs=[chatbot, format],\n", + " outputs=[markdown_preview, extract_btn, file_name]\n", + " )\n", + "\n", + " model_type.change(\n", + " fn=lambda x: gr.update(choices=MODEL_NAMES[x], value=MODEL_NAMES[x][0]),\n", + " inputs=[model_type],\n", + " outputs=[model_name]\n", + " )\n", + "\n", + " submit.click(\n", + " fn=run_inference,\n", + " inputs=[scenario, model_type, model_name, output_tokens, dataset_size, format],\n", + " outputs=[error, chatbot],\n", + " show_progress=True\n", + " )\n", + "\n", + " clear.click(\n", + " clear_chat,\n", + " outputs=[scenario, chatbot, dataset_preview, file_saved]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "kzDUJahK8uRN", + "outputId": "c5674be2-b262-4439-ae91-4f3e1f49e041" + }, + "outputs": [], + "source": [ + "# Example Scenarios\n", + "\n", + "# Generate a dataset for predicting customer churn in a subscription-based telecom company. Include features like monthly charges, contract type, tenure (in months), number of support calls, internet usage (in GB), payment method, and whether the customer has churned.\n", + "# Generate a dataset for training a model to approve/reject loan applications. Include features like loan amount, applicant income, co-applicant income, employment type, credit history (binary), loan term, number of dependents, education level, and loan approval status.\n", + "# Create a dataset of credit card transactions for detecting fraud. Include transaction ID, amount, timestamp, merchant category, customer location, card presence (yes/no), transaction device type, and fraud label (yes/no).\n", + "# Generate a dataset of investment customers with fields like portfolio value, age, income bracket, risk appetite (low/medium/high), number of transactions per month, preferred investment types, and risk score.\n", + "# Create a dataset of hospitalized patients to predict readmission within 30 days. Include patient ID, age, gender, number of prior admissions, diagnosis codes, length of stay, discharge type, medications prescribed, and readmission label.\n", + "# Generate a dataset for predicting medical appointment no-shows. Include appointment ID, scheduled date, appointment date, lead time (days between scheduling and appointment), SMS reminders sent, patient age, gender, health condition severity, and no-show status.\n", + "\n", + "generator_ui.launch(share=True, debug=True, inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_9HIC_AzfZBZ" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From b2b26ddd4b4063cdbcd43fed4663e19cb0381579 Mon Sep 17 00:00:00 2001 From: Fikri Raihan Date: Sun, 25 May 2025 17:38:22 +0700 Subject: [PATCH 03/23] Added my contributions to community contributions week1 day1, github information --- .../day-1-github-information.ipynb | 841 ++++++++++++++++++ 1 file changed, 841 insertions(+) create mode 100644 week1/community-contributions/day-1-github-information.ipynb diff --git a/week1/community-contributions/day-1-github-information.ipynb b/week1/community-contributions/day-1-github-information.ipynb new file mode 100644 index 0000000..b5adb6d --- /dev/null +++ b/week1/community-contributions/day-1-github-information.ipynb @@ -0,0 +1,841 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4d011f3d-c10c-4a75-bd36-576e383a8d1d", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c51302e0-c848-4ec4-a0ab-03deeb9e7987", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Api key found and looks good so far!\n" + ] + } + ], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if not api_key:\n", + " print('No Api Key was found')\n", + "elif not api_key.startswith('sk-proj-'):\n", + " print(\"An api key was found, but it doesnt start with sk-proj\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An api key was found, but it might have space in the first or end\")\n", + "else:\n", + " print(\"Api key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d1df04f3-bd4d-4b14-87cc-1e91eaf7c0ab", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "340b018a-6e97-491c-aa26-66c683ece8a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! Welcome! How can I assist you today?\n" + ] + } + ], + "source": [ + "message = \"Hello GPT, this is my first message\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\": \"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "4a06c291-2fe6-4669-a8b6-3b67769eb3fa", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "dd36b141-a252-44a8-8fa4-d4c2c33d3db9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fikriraihan · GitHub\n", + "Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "Appearance settings\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "GitHub Models\n", + "New\n", + "Manage and compare prompts\n", + "GitHub Advanced Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "Why GitHub\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "Nonprofits\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "Events & Webinars\n", + "Ebooks & Whitepapers\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "GitHub Advanced Security\n", + "Enterprise-grade security features\n", + "Copilot for business\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Appearance settings\n", + "Resetting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Fikriraihan\n", + "Follow\n", + "Overview\n", + "Repositories\n", + "34\n", + "Projects\n", + "0\n", + "Packages\n", + "0\n", + "Stars\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "Stars\n", + "Fikriraihan\n", + "Follow\n", + "Fikriraihan\n", + "Follow\n", + "Block or Report\n", + "Block or report Fikriraihan\n", + "Report abuse\n", + "Contact GitHub support about this user’s behavior.\n", + " Learn more about\n", + "reporting abuse\n", + ".\n", + "Report abuse\n", + "Overview\n", + "Repositories\n", + "34\n", + "Projects\n", + "0\n", + "Packages\n", + "0\n", + "Stars\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "Stars\n", + "Pinned\n", + "Loading\n", + "2024-coding-challenge\n", + "2024-coding-challenge\n", + "Public\n", + "Repository for Coding Challenge 2024\n", + "JavaScript\n", + "ChatGPT\n", + "ChatGPT\n", + "Public\n", + "TypeScript\n", + "fikri-3d-portofolio\n", + "fikri-3d-portofolio\n", + "Public\n", + "JavaScript\n", + "nextjs-dashboard\n", + "nextjs-dashboard\n", + "Public\n", + "Nextjs-dashboard course\n", + "TypeScript\n", + "nextjs-postgre\n", + "nextjs-postgre\n", + "Public\n", + "TypeScript\n", + "imaginify\n", + "imaginify\n", + "Public\n", + "TypeScript\n", + "Something went wrong, please refresh the page to try again.\n", + "If the problem persists, check the\n", + "GitHub status page\n", + "or\n", + "contact support\n", + ".\n", + "Uh oh!\n", + "There was an error while loading.\n", + "Please reload this page\n", + ".\n", + "Footer\n", + "© 2025 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n" + ] + } + ], + "source": [ + "github = Website(\"https://github.com/Fikriraihan\")\n", + "print(github.title)\n", + "print(github.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "ea402ba2-6c7f-4f96-95c0-d68a0e96e644", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are a skilled GitHub profile analyzer. \" \\\n", + "\"Your job is to take the provided GitHub profile or repository URL and generate a clear, structured summary covering these points: \" \\\n", + "\"1️⃣ **Profile Summary** \" \\\n", + "\"- Username \" \\\n", + "\"- Bio (if available) \" \\\n", + "\"- Total public repositories \" \\\n", + "\"- Total followers \" \\\n", + "\"- Total stars received (sum across repos) \" \\\n", + "\"- Top programming languages (by repo count) \" \\\n", + "\"2️⃣ **Repository Highlights** (top 3 by stars or activity) \" \\\n", + "\"For each: \" \\\n", + "\"- Repository name \" \\\n", + "\"- Description \" \\\n", + "\"- Primary language \" \\\n", + "\"- Star count \" \\\n", + "\"- Last updated date \" \\\n", + "\"- Notable technologies or frameworks used \" \\\n", + "\"3️⃣ **Overall Assessment** \" \\\n", + "\"- What does this user specialize in? \" \\\n", + "\"- Are they more focused on personal projects or collaborations? \" \\\n", + "\"- Any standout strengths or skills you notice? \" \\\n", + "\"4️⃣ **Recommendations** \" \\\n", + "\"- Suggest one area or technology they could explore next to grow. \" \\\n", + "\"- Suggest one improvement to make their GitHub profile more appealing. \" \\\n", + "\"Be concise, insightful, and encourage the user’s growth. \" \\\n", + "\"If some data is missing, state it clearly instead of guessing.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "a964e8f2-40f4-457b-9c81-7e6e2768f450", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a github named {website.title}\"\n", + " user_prompt += \"\\nThe contents of this github is as follows; \\\n", + "please provide a summary of this website in markdown.\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "026d8ae4-1aea-45b9-b694-db0809527780", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'You are a skilled GitHub profile analyzer. Your job is to take the provided GitHub profile or repository URL and generate a clear, structured summary covering these points: 1️⃣ **Profile Summary** - Username - Bio (if available) - Total public repositories - Total followers - Total stars received (sum across repos) - Top programming languages (by repo count) 2️⃣ **Repository Highlights** (top 3 by stars or activity) For each: - Repository name - Description - Primary language - Star count - Last updated date - Notable technologies or frameworks used 3️⃣ **Overall Assessment** - What does this user specialize in? - Are they more focused on personal projects or collaborations? - Any standout strengths or skills you notice? 4️⃣ **Recommendations** - Suggest one area or technology they could explore next to grow. - Suggest one improvement to make their GitHub profile more appealing. Be concise, insightful, and encourage the user’s growth. If some data is missing, state it clearly instead of guessing.'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "system_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "2e040916-8d7e-421b-b1a7-56e710940eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are looking at a github named Fikriraihan · GitHub\n", + "The contents of this github is as follows; please provide a summary of this website in markdown.Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "Appearance settings\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "GitHub Models\n", + "New\n", + "Manage and compare prompts\n", + "GitHub Advanced Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "Why GitHub\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "Nonprofits\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "Events & Webinars\n", + "Ebooks & Whitepapers\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "GitHub Advanced Security\n", + "Enterprise-grade security features\n", + "Copilot for business\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Appearance settings\n", + "Resetting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Fikriraihan\n", + "Follow\n", + "Overview\n", + "Repositories\n", + "34\n", + "Projects\n", + "0\n", + "Packages\n", + "0\n", + "Stars\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "Stars\n", + "Fikriraihan\n", + "Follow\n", + "Fikriraihan\n", + "Follow\n", + "Block or Report\n", + "Block or report Fikriraihan\n", + "Report abuse\n", + "Contact GitHub support about this user’s behavior.\n", + " Learn more about\n", + "reporting abuse\n", + ".\n", + "Report abuse\n", + "Overview\n", + "Repositories\n", + "34\n", + "Projects\n", + "0\n", + "Packages\n", + "0\n", + "Stars\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "Stars\n", + "Pinned\n", + "Loading\n", + "2024-coding-challenge\n", + "2024-coding-challenge\n", + "Public\n", + "Repository for Coding Challenge 2024\n", + "JavaScript\n", + "ChatGPT\n", + "ChatGPT\n", + "Public\n", + "TypeScript\n", + "fikri-3d-portofolio\n", + "fikri-3d-portofolio\n", + "Public\n", + "JavaScript\n", + "nextjs-dashboard\n", + "nextjs-dashboard\n", + "Public\n", + "Nextjs-dashboard course\n", + "TypeScript\n", + "nextjs-postgre\n", + "nextjs-postgre\n", + "Public\n", + "TypeScript\n", + "imaginify\n", + "imaginify\n", + "Public\n", + "TypeScript\n", + "Something went wrong, please refresh the page to try again.\n", + "If the problem persists, check the\n", + "GitHub status page\n", + "or\n", + "contact support\n", + ".\n", + "Uh oh!\n", + "There was an error while loading.\n", + "Please reload this page\n", + ".\n", + "Footer\n", + "© 2025 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n" + ] + } + ], + "source": [ + "print(user_prompt_for(github))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "da2a2c62-0ff4-4e4b-a1a1-774b47f848a0", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"tell me a fruit that has red color\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "204b9b40-cfd9-46f4-a954-efee75fc3d79", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Oh, I don’t know, how about the classic red apple? Or maybe you were hoping for something more exotic, like a blood orange? There’s also the ever-popular strawberry. The options are endless! What’s next, a fruit quiz?\n" + ] + } + ], + "source": [ + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "0a4a376a-8c20-4fd3-91ad-25511df76292", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "11bc74b0-7ca7-40da-81cc-84b2dd04780b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'system',\n", + " 'content': 'You are a skilled GitHub profile analyzer. Your job is to take the provided GitHub profile or repository URL and generate a clear, structured summary covering these points: 1️⃣ **Profile Summary** - Username - Bio (if available) - Total public repositories - Total followers - Total stars received (sum across repos) - Top programming languages (by repo count) 2️⃣ **Repository Highlights** (top 3 by stars or activity) For each: - Repository name - Description - Primary language - Star count - Last updated date - Notable technologies or frameworks used 3️⃣ **Overall Assessment** - What does this user specialize in? - Are they more focused on personal projects or collaborations? - Any standout strengths or skills you notice? 4️⃣ **Recommendations** - Suggest one area or technology they could explore next to grow. - Suggest one improvement to make their GitHub profile more appealing. Be concise, insightful, and encourage the user’s growth. If some data is missing, state it clearly instead of guessing.'},\n", + " {'role': 'user',\n", + " 'content': 'You are looking at a github named Fikriraihan · GitHub\\nThe contents of this github is as follows; please provide a summary of this website in markdown.Skip to content\\nNavigation Menu\\nToggle navigation\\nSign in\\nAppearance settings\\nProduct\\nGitHub Copilot\\nWrite better code with AI\\nGitHub Models\\nNew\\nManage and compare prompts\\nGitHub Advanced Security\\nFind and fix vulnerabilities\\nActions\\nAutomate any workflow\\nCodespaces\\nInstant dev environments\\nIssues\\nPlan and track work\\nCode Review\\nManage code changes\\nDiscussions\\nCollaborate outside of code\\nCode Search\\nFind more, search less\\nExplore\\nWhy GitHub\\nAll features\\nDocumentation\\nGitHub Skills\\nBlog\\nSolutions\\nBy company size\\nEnterprises\\nSmall and medium teams\\nStartups\\nNonprofits\\nBy use case\\nDevSecOps\\nDevOps\\nCI/CD\\nView all use cases\\nBy industry\\nHealthcare\\nFinancial services\\nManufacturing\\nGovernment\\nView all industries\\nView all solutions\\nResources\\nTopics\\nAI\\nDevOps\\nSecurity\\nSoftware Development\\nView all\\nExplore\\nLearning Pathways\\nEvents & Webinars\\nEbooks & Whitepapers\\nCustomer Stories\\nPartners\\nExecutive Insights\\nOpen Source\\nGitHub Sponsors\\nFund open source developers\\nThe ReadME Project\\nGitHub community articles\\nRepositories\\nTopics\\nTrending\\nCollections\\nEnterprise\\nEnterprise platform\\nAI-powered developer platform\\nAvailable add-ons\\nGitHub Advanced Security\\nEnterprise-grade security features\\nCopilot for business\\nEnterprise-grade AI features\\nPremium Support\\nEnterprise-grade 24/7 support\\nPricing\\nSearch or jump to...\\nSearch code, repositories, users, issues, pull requests...\\nSearch\\nClear\\nSearch syntax tips\\nProvide feedback\\nWe read every piece of feedback, and take your input very seriously.\\nInclude my email address so I can be contacted\\nCancel\\nSubmit feedback\\nSaved searches\\nUse saved searches to filter your results more quickly\\nCancel\\nCreate saved search\\nSign in\\nSign up\\nAppearance settings\\nResetting focus\\nYou signed in with another tab or window.\\nReload\\nto refresh your session.\\nYou signed out in another tab or window.\\nReload\\nto refresh your session.\\nYou switched accounts on another tab or window.\\nReload\\nto refresh your session.\\nDismiss alert\\nFikriraihan\\nFollow\\nOverview\\nRepositories\\n34\\nProjects\\n0\\nPackages\\n0\\nStars\\n0\\nMore\\nOverview\\nRepositories\\nProjects\\nPackages\\nStars\\nFikriraihan\\nFollow\\nFikriraihan\\nFollow\\nBlock or Report\\nBlock or report Fikriraihan\\nReport abuse\\nContact GitHub support about this user’s behavior.\\n Learn more about\\nreporting abuse\\n.\\nReport abuse\\nOverview\\nRepositories\\n34\\nProjects\\n0\\nPackages\\n0\\nStars\\n0\\nMore\\nOverview\\nRepositories\\nProjects\\nPackages\\nStars\\nPinned\\nLoading\\n2024-coding-challenge\\n2024-coding-challenge\\nPublic\\nRepository for Coding Challenge 2024\\nJavaScript\\nChatGPT\\nChatGPT\\nPublic\\nTypeScript\\nfikri-3d-portofolio\\nfikri-3d-portofolio\\nPublic\\nJavaScript\\nnextjs-dashboard\\nnextjs-dashboard\\nPublic\\nNextjs-dashboard course\\nTypeScript\\nnextjs-postgre\\nnextjs-postgre\\nPublic\\nTypeScript\\nimaginify\\nimaginify\\nPublic\\nTypeScript\\nSomething went wrong, please refresh the page to try again.\\nIf the problem persists, check the\\nGitHub status page\\nor\\ncontact support\\n.\\nUh oh!\\nThere was an error while loading.\\nPlease reload this page\\n.\\nFooter\\n© 2025 GitHub,\\xa0Inc.\\nFooter navigation\\nTerms\\nPrivacy\\nSecurity\\nStatus\\nDocs\\nContact\\nManage cookies\\nDo not share my personal information\\nYou can’t perform that action at this time.'}]" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "messages_for(github)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "e64f497f-3742-4d70-9e15-29d1974b3361", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "95d0938d-0b26-4253-94a6-ac9240e7a8c9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'# GitHub Profile Summary for Fikriraihan\\n\\n### 1️⃣ Profile Summary\\n- **Username:** Fikriraihan\\n- **Bio:** (No bio available)\\n- **Total public repositories:** 34\\n- **Total followers:** 0 (not indicated)\\n- **Total stars received:** 0\\n- **Top programming languages (by repo count):**\\n - JavaScript\\n - TypeScript\\n\\n### 2️⃣ Repository Highlights\\n**Top 3 repositories by activity:**\\n\\n1. **Repository Name:** 2024-coding-challenge\\n - **Description:** Repository for Coding Challenge 2024\\n - **Primary Language:** JavaScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** JavaScript\\n\\n2. **Repository Name:** ChatGPT\\n - **Description:** (No description provided)\\n - **Primary Language:** TypeScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** TypeScript\\n\\n3. **Repository Name:** fikri-3d-portofolio\\n - **Description:** (No description provided)\\n - **Primary Language:** JavaScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** JavaScript\\n\\n### 3️⃣ Overall Assessment\\n- **What does this user specialize in?** \\n - Based on the repository languages, Fikriraihan appears to specialize in JavaScript and TypeScript.\\n\\n- **Are they more focused on personal projects or collaborations?** \\n - The profile indicates a focus on personal projects given the lack of followers and collaborations apparent from the repositories.\\n\\n- **Any standout strengths or skills you notice?** \\n - The presence of JavaScript and TypeScript projects suggests proficiency in web development, specifically in relation to modern frameworks.\\n\\n### 4️⃣ Recommendations\\n- **One area or technology to explore next to grow:**\\n - Fikriraihan could benefit from exploring backend technologies, such as Node.js or Express, to complement their front-end skills with JavaScript/TypeScript.\\n\\n- **One improvement to make their GitHub profile more appealing:**\\n - Adding a bio and descriptions for each repository would help provide context and showcase their intent and the purpose behind each project, thereby attracting more engagement and potential collaborators.'" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "summarize(\"https://github.com/Fikriraihan\")" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "cd863db3-731a-46d8-ac14-f74f8ae39bd4", + "metadata": {}, + "outputs": [], + "source": [ + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "70c5c3aa-2c06-460b-9c4f-6465d2c8611c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# GitHub Profile Summary for Fikriraihan\n", + "\n", + "### 1️⃣ Profile Summary\n", + "- **Username:** Fikriraihan\n", + "- **Bio:** Not available\n", + "- **Total public repositories:** 34\n", + "- **Total followers:** Not available\n", + "- **Total stars received:** 0\n", + "- **Top programming languages (by repo count):**\n", + " - JavaScript\n", + " - TypeScript\n", + "\n", + "### 2️⃣ Repository Highlights\n", + "Here are the top repositories based on their details:\n", + "\n", + "1. **Repository Name:** 2024-coding-challenge\n", + " - **Description:** Repository for Coding Challenge 2024\n", + " - **Primary Language:** JavaScript\n", + " - **Star Count:** 0\n", + " - **Last Updated Date:** Not available\n", + " - **Notable Technologies/Frameworks Used:** None specified\n", + "\n", + "2. **Repository Name:** ChatGPT\n", + " - **Description:** Not available\n", + " - **Primary Language:** TypeScript\n", + " - **Star Count:** 0\n", + " - **Last Updated Date:** Not available\n", + " - **Notable Technologies/Frameworks Used:** None specified\n", + "\n", + "3. **Repository Name:** fikri-3d-portofolio\n", + " - **Description:** Not available\n", + " - **Primary Language:** JavaScript\n", + " - **Star Count:** 0\n", + " - **Last Updated Date:** Not available\n", + " - **Notable Technologies/Frameworks Used:** None specified\n", + "\n", + "### 3️⃣ Overall Assessment\n", + "- **What does this user specialize in?** Fikriraihan specializes in JavaScript and TypeScript, indicating a focus on web development or applications that utilize these languages.\n", + "- **Are they more focused on personal projects or collaborations?** The presence of multiple repositories suggests a mix of personal projects. There is no indication of collaboration, as there are no mentions of contributions to external repositories.\n", + "- **Any standout strengths or skills you notice?** The variety of repositories shows an interest in different coding challenges and portfolio projects. However, the lack of stars suggests that the projects may not yet attract a significant audience.\n", + "\n", + "### 4️⃣ Recommendations\n", + "- **Suggest one area or technology they could explore next to grow:** Given the user’s focus on JavaScript and TypeScript, exploring frameworks like React, Vue.js, or even server-side technologies such as Node.js could be beneficial.\n", + "- **Suggest one improvement to make their GitHub profile more appealing:** Adding a bio with a brief introduction and interests, along with project descriptions, would provide more context about the user and enhance engagement with their repositories. Additionally, increasing the visibility of the repositories through more optimization and possibly sharing or collaborating on projects could attract more stars and followers." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_summary(\"https://github.com/Fikriraihan\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3dfe6e3-dfd2-4acd-a2e4-681873c650c8", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From feba5d084777035c42908e18e6214fdb994b142a Mon Sep 17 00:00:00 2001 From: Fikri Raihan Date: Sun, 25 May 2025 17:46:40 +0700 Subject: [PATCH 04/23] Added my contributions to community-contributions wk1 day1, github information --- .../day-1-github-information.ipynb | 626 +----------------- 1 file changed, 23 insertions(+), 603 deletions(-) diff --git a/week1/community-contributions/day-1-github-information.ipynb b/week1/community-contributions/day-1-github-information.ipynb index b5adb6d..5b8cf40 100644 --- a/week1/community-contributions/day-1-github-information.ipynb +++ b/week1/community-contributions/day-1-github-information.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "4d011f3d-c10c-4a75-bd36-576e383a8d1d", "metadata": {}, "outputs": [], @@ -22,18 +22,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "c51302e0-c848-4ec4-a0ab-03deeb9e7987", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Api key found and looks good so far!\n" - ] - } - ], + "outputs": [], "source": [ "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", @@ -50,7 +42,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "d1df04f3-bd4d-4b14-87cc-1e91eaf7c0ab", "metadata": {}, "outputs": [], @@ -60,18 +52,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "340b018a-6e97-491c-aa26-66c683ece8a0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello! Welcome! How can I assist you today?\n" - ] - } - ], + "outputs": [], "source": [ "message = \"Hello GPT, this is my first message\"\n", "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\": \"user\", \"content\":message}])\n", @@ -80,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "4a06c291-2fe6-4669-a8b6-3b67769eb3fa", "metadata": {}, "outputs": [], @@ -106,223 +90,10 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "dd36b141-a252-44a8-8fa4-d4c2c33d3db9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fikriraihan · GitHub\n", - "Skip to content\n", - "Navigation Menu\n", - "Toggle navigation\n", - "Sign in\n", - "Appearance settings\n", - "Product\n", - "GitHub Copilot\n", - "Write better code with AI\n", - "GitHub Models\n", - "New\n", - "Manage and compare prompts\n", - "GitHub Advanced Security\n", - "Find and fix vulnerabilities\n", - "Actions\n", - "Automate any workflow\n", - "Codespaces\n", - "Instant dev environments\n", - "Issues\n", - "Plan and track work\n", - "Code Review\n", - "Manage code changes\n", - "Discussions\n", - "Collaborate outside of code\n", - "Code Search\n", - "Find more, search less\n", - "Explore\n", - "Why GitHub\n", - "All features\n", - "Documentation\n", - "GitHub Skills\n", - "Blog\n", - "Solutions\n", - "By company size\n", - "Enterprises\n", - "Small and medium teams\n", - "Startups\n", - "Nonprofits\n", - "By use case\n", - "DevSecOps\n", - "DevOps\n", - "CI/CD\n", - "View all use cases\n", - "By industry\n", - "Healthcare\n", - "Financial services\n", - "Manufacturing\n", - "Government\n", - "View all industries\n", - "View all solutions\n", - "Resources\n", - "Topics\n", - "AI\n", - "DevOps\n", - "Security\n", - "Software Development\n", - "View all\n", - "Explore\n", - "Learning Pathways\n", - "Events & Webinars\n", - "Ebooks & Whitepapers\n", - "Customer Stories\n", - "Partners\n", - "Executive Insights\n", - "Open Source\n", - "GitHub Sponsors\n", - "Fund open source developers\n", - "The ReadME Project\n", - "GitHub community articles\n", - "Repositories\n", - "Topics\n", - "Trending\n", - "Collections\n", - "Enterprise\n", - "Enterprise platform\n", - "AI-powered developer platform\n", - "Available add-ons\n", - "GitHub Advanced Security\n", - "Enterprise-grade security features\n", - "Copilot for business\n", - "Enterprise-grade AI features\n", - "Premium Support\n", - "Enterprise-grade 24/7 support\n", - "Pricing\n", - "Search or jump to...\n", - "Search code, repositories, users, issues, pull requests...\n", - "Search\n", - "Clear\n", - "Search syntax tips\n", - "Provide feedback\n", - "We read every piece of feedback, and take your input very seriously.\n", - "Include my email address so I can be contacted\n", - "Cancel\n", - "Submit feedback\n", - "Saved searches\n", - "Use saved searches to filter your results more quickly\n", - "Cancel\n", - "Create saved search\n", - "Sign in\n", - "Sign up\n", - "Appearance settings\n", - "Resetting focus\n", - "You signed in with another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You signed out in another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You switched accounts on another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "Dismiss alert\n", - "Fikriraihan\n", - "Follow\n", - "Overview\n", - "Repositories\n", - "34\n", - "Projects\n", - "0\n", - "Packages\n", - "0\n", - "Stars\n", - "0\n", - "More\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "Stars\n", - "Fikriraihan\n", - "Follow\n", - "Fikriraihan\n", - "Follow\n", - "Block or Report\n", - "Block or report Fikriraihan\n", - "Report abuse\n", - "Contact GitHub support about this user’s behavior.\n", - " Learn more about\n", - "reporting abuse\n", - ".\n", - "Report abuse\n", - "Overview\n", - "Repositories\n", - "34\n", - "Projects\n", - "0\n", - "Packages\n", - "0\n", - "Stars\n", - "0\n", - "More\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "Stars\n", - "Pinned\n", - "Loading\n", - "2024-coding-challenge\n", - "2024-coding-challenge\n", - "Public\n", - "Repository for Coding Challenge 2024\n", - "JavaScript\n", - "ChatGPT\n", - "ChatGPT\n", - "Public\n", - "TypeScript\n", - "fikri-3d-portofolio\n", - "fikri-3d-portofolio\n", - "Public\n", - "JavaScript\n", - "nextjs-dashboard\n", - "nextjs-dashboard\n", - "Public\n", - "Nextjs-dashboard course\n", - "TypeScript\n", - "nextjs-postgre\n", - "nextjs-postgre\n", - "Public\n", - "TypeScript\n", - "imaginify\n", - "imaginify\n", - "Public\n", - "TypeScript\n", - "Something went wrong, please refresh the page to try again.\n", - "If the problem persists, check the\n", - "GitHub status page\n", - "or\n", - "contact support\n", - ".\n", - "Uh oh!\n", - "There was an error while loading.\n", - "Please reload this page\n", - ".\n", - "Footer\n", - "© 2025 GitHub, Inc.\n", - "Footer navigation\n", - "Terms\n", - "Privacy\n", - "Security\n", - "Status\n", - "Docs\n", - "Contact\n", - "Manage cookies\n", - "Do not share my personal information\n", - "You can’t perform that action at this time.\n" - ] - } - ], + "outputs": [], "source": [ "github = Website(\"https://github.com/Fikriraihan\")\n", "print(github.title)\n", @@ -331,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "ea402ba2-6c7f-4f96-95c0-d68a0e96e644", "metadata": {}, "outputs": [], @@ -366,7 +137,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "a964e8f2-40f4-457b-9c81-7e6e2768f450", "metadata": {}, "outputs": [], @@ -381,321 +152,37 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "026d8ae4-1aea-45b9-b694-db0809527780", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'You are a skilled GitHub profile analyzer. Your job is to take the provided GitHub profile or repository URL and generate a clear, structured summary covering these points: 1️⃣ **Profile Summary** - Username - Bio (if available) - Total public repositories - Total followers - Total stars received (sum across repos) - Top programming languages (by repo count) 2️⃣ **Repository Highlights** (top 3 by stars or activity) For each: - Repository name - Description - Primary language - Star count - Last updated date - Notable technologies or frameworks used 3️⃣ **Overall Assessment** - What does this user specialize in? - Are they more focused on personal projects or collaborations? - Any standout strengths or skills you notice? 4️⃣ **Recommendations** - Suggest one area or technology they could explore next to grow. - Suggest one improvement to make their GitHub profile more appealing. Be concise, insightful, and encourage the user’s growth. If some data is missing, state it clearly instead of guessing.'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "system_prompt" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "2e040916-8d7e-421b-b1a7-56e710940eaa", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are looking at a github named Fikriraihan · GitHub\n", - "The contents of this github is as follows; please provide a summary of this website in markdown.Skip to content\n", - "Navigation Menu\n", - "Toggle navigation\n", - "Sign in\n", - "Appearance settings\n", - "Product\n", - "GitHub Copilot\n", - "Write better code with AI\n", - "GitHub Models\n", - "New\n", - "Manage and compare prompts\n", - "GitHub Advanced Security\n", - "Find and fix vulnerabilities\n", - "Actions\n", - "Automate any workflow\n", - "Codespaces\n", - "Instant dev environments\n", - "Issues\n", - "Plan and track work\n", - "Code Review\n", - "Manage code changes\n", - "Discussions\n", - "Collaborate outside of code\n", - "Code Search\n", - "Find more, search less\n", - "Explore\n", - "Why GitHub\n", - "All features\n", - "Documentation\n", - "GitHub Skills\n", - "Blog\n", - "Solutions\n", - "By company size\n", - "Enterprises\n", - "Small and medium teams\n", - "Startups\n", - "Nonprofits\n", - "By use case\n", - "DevSecOps\n", - "DevOps\n", - "CI/CD\n", - "View all use cases\n", - "By industry\n", - "Healthcare\n", - "Financial services\n", - "Manufacturing\n", - "Government\n", - "View all industries\n", - "View all solutions\n", - "Resources\n", - "Topics\n", - "AI\n", - "DevOps\n", - "Security\n", - "Software Development\n", - "View all\n", - "Explore\n", - "Learning Pathways\n", - "Events & Webinars\n", - "Ebooks & Whitepapers\n", - "Customer Stories\n", - "Partners\n", - "Executive Insights\n", - "Open Source\n", - "GitHub Sponsors\n", - "Fund open source developers\n", - "The ReadME Project\n", - "GitHub community articles\n", - "Repositories\n", - "Topics\n", - "Trending\n", - "Collections\n", - "Enterprise\n", - "Enterprise platform\n", - "AI-powered developer platform\n", - "Available add-ons\n", - "GitHub Advanced Security\n", - "Enterprise-grade security features\n", - "Copilot for business\n", - "Enterprise-grade AI features\n", - "Premium Support\n", - "Enterprise-grade 24/7 support\n", - "Pricing\n", - "Search or jump to...\n", - "Search code, repositories, users, issues, pull requests...\n", - "Search\n", - "Clear\n", - "Search syntax tips\n", - "Provide feedback\n", - "We read every piece of feedback, and take your input very seriously.\n", - "Include my email address so I can be contacted\n", - "Cancel\n", - "Submit feedback\n", - "Saved searches\n", - "Use saved searches to filter your results more quickly\n", - "Cancel\n", - "Create saved search\n", - "Sign in\n", - "Sign up\n", - "Appearance settings\n", - "Resetting focus\n", - "You signed in with another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You signed out in another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You switched accounts on another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "Dismiss alert\n", - "Fikriraihan\n", - "Follow\n", - "Overview\n", - "Repositories\n", - "34\n", - "Projects\n", - "0\n", - "Packages\n", - "0\n", - "Stars\n", - "0\n", - "More\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "Stars\n", - "Fikriraihan\n", - "Follow\n", - "Fikriraihan\n", - "Follow\n", - "Block or Report\n", - "Block or report Fikriraihan\n", - "Report abuse\n", - "Contact GitHub support about this user’s behavior.\n", - " Learn more about\n", - "reporting abuse\n", - ".\n", - "Report abuse\n", - "Overview\n", - "Repositories\n", - "34\n", - "Projects\n", - "0\n", - "Packages\n", - "0\n", - "Stars\n", - "0\n", - "More\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "Stars\n", - "Pinned\n", - "Loading\n", - "2024-coding-challenge\n", - "2024-coding-challenge\n", - "Public\n", - "Repository for Coding Challenge 2024\n", - "JavaScript\n", - "ChatGPT\n", - "ChatGPT\n", - "Public\n", - "TypeScript\n", - "fikri-3d-portofolio\n", - "fikri-3d-portofolio\n", - "Public\n", - "JavaScript\n", - "nextjs-dashboard\n", - "nextjs-dashboard\n", - "Public\n", - "Nextjs-dashboard course\n", - "TypeScript\n", - "nextjs-postgre\n", - "nextjs-postgre\n", - "Public\n", - "TypeScript\n", - "imaginify\n", - "imaginify\n", - "Public\n", - "TypeScript\n", - "Something went wrong, please refresh the page to try again.\n", - "If the problem persists, check the\n", - "GitHub status page\n", - "or\n", - "contact support\n", - ".\n", - "Uh oh!\n", - "There was an error while loading.\n", - "Please reload this page\n", - ".\n", - "Footer\n", - "© 2025 GitHub, Inc.\n", - "Footer navigation\n", - "Terms\n", - "Privacy\n", - "Security\n", - "Status\n", - "Docs\n", - "Contact\n", - "Manage cookies\n", - "Do not share my personal information\n", - "You can’t perform that action at this time.\n" - ] - } - ], + "outputs": [], "source": [ "print(user_prompt_for(github))" ] }, { "cell_type": "code", - "execution_count": 18, - "id": "da2a2c62-0ff4-4e4b-a1a1-774b47f848a0", - "metadata": {}, - "outputs": [], - "source": [ - "messages = [\n", - " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", - " {\"role\": \"user\", \"content\": \"tell me a fruit that has red color\"}\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "204b9b40-cfd9-46f4-a954-efee75fc3d79", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Oh, I don’t know, how about the classic red apple? Or maybe you were hoping for something more exotic, like a blood orange? There’s also the ever-popular strawberry. The options are endless! What’s next, a fruit quiz?\n" - ] - } - ], - "source": [ - "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", - "print(response.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "0a4a376a-8c20-4fd3-91ad-25511df76292", - "metadata": {}, - "outputs": [], - "source": [ - "def messages_for(website):\n", - " return [\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "11bc74b0-7ca7-40da-81cc-84b2dd04780b", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'role': 'system',\n", - " 'content': 'You are a skilled GitHub profile analyzer. Your job is to take the provided GitHub profile or repository URL and generate a clear, structured summary covering these points: 1️⃣ **Profile Summary** - Username - Bio (if available) - Total public repositories - Total followers - Total stars received (sum across repos) - Top programming languages (by repo count) 2️⃣ **Repository Highlights** (top 3 by stars or activity) For each: - Repository name - Description - Primary language - Star count - Last updated date - Notable technologies or frameworks used 3️⃣ **Overall Assessment** - What does this user specialize in? - Are they more focused on personal projects or collaborations? - Any standout strengths or skills you notice? 4️⃣ **Recommendations** - Suggest one area or technology they could explore next to grow. - Suggest one improvement to make their GitHub profile more appealing. Be concise, insightful, and encourage the user’s growth. If some data is missing, state it clearly instead of guessing.'},\n", - " {'role': 'user',\n", - " 'content': 'You are looking at a github named Fikriraihan · GitHub\\nThe contents of this github is as follows; please provide a summary of this website in markdown.Skip to content\\nNavigation Menu\\nToggle navigation\\nSign in\\nAppearance settings\\nProduct\\nGitHub Copilot\\nWrite better code with AI\\nGitHub Models\\nNew\\nManage and compare prompts\\nGitHub Advanced Security\\nFind and fix vulnerabilities\\nActions\\nAutomate any workflow\\nCodespaces\\nInstant dev environments\\nIssues\\nPlan and track work\\nCode Review\\nManage code changes\\nDiscussions\\nCollaborate outside of code\\nCode Search\\nFind more, search less\\nExplore\\nWhy GitHub\\nAll features\\nDocumentation\\nGitHub Skills\\nBlog\\nSolutions\\nBy company size\\nEnterprises\\nSmall and medium teams\\nStartups\\nNonprofits\\nBy use case\\nDevSecOps\\nDevOps\\nCI/CD\\nView all use cases\\nBy industry\\nHealthcare\\nFinancial services\\nManufacturing\\nGovernment\\nView all industries\\nView all solutions\\nResources\\nTopics\\nAI\\nDevOps\\nSecurity\\nSoftware Development\\nView all\\nExplore\\nLearning Pathways\\nEvents & Webinars\\nEbooks & Whitepapers\\nCustomer Stories\\nPartners\\nExecutive Insights\\nOpen Source\\nGitHub Sponsors\\nFund open source developers\\nThe ReadME Project\\nGitHub community articles\\nRepositories\\nTopics\\nTrending\\nCollections\\nEnterprise\\nEnterprise platform\\nAI-powered developer platform\\nAvailable add-ons\\nGitHub Advanced Security\\nEnterprise-grade security features\\nCopilot for business\\nEnterprise-grade AI features\\nPremium Support\\nEnterprise-grade 24/7 support\\nPricing\\nSearch or jump to...\\nSearch code, repositories, users, issues, pull requests...\\nSearch\\nClear\\nSearch syntax tips\\nProvide feedback\\nWe read every piece of feedback, and take your input very seriously.\\nInclude my email address so I can be contacted\\nCancel\\nSubmit feedback\\nSaved searches\\nUse saved searches to filter your results more quickly\\nCancel\\nCreate saved search\\nSign in\\nSign up\\nAppearance settings\\nResetting focus\\nYou signed in with another tab or window.\\nReload\\nto refresh your session.\\nYou signed out in another tab or window.\\nReload\\nto refresh your session.\\nYou switched accounts on another tab or window.\\nReload\\nto refresh your session.\\nDismiss alert\\nFikriraihan\\nFollow\\nOverview\\nRepositories\\n34\\nProjects\\n0\\nPackages\\n0\\nStars\\n0\\nMore\\nOverview\\nRepositories\\nProjects\\nPackages\\nStars\\nFikriraihan\\nFollow\\nFikriraihan\\nFollow\\nBlock or Report\\nBlock or report Fikriraihan\\nReport abuse\\nContact GitHub support about this user’s behavior.\\n Learn more about\\nreporting abuse\\n.\\nReport abuse\\nOverview\\nRepositories\\n34\\nProjects\\n0\\nPackages\\n0\\nStars\\n0\\nMore\\nOverview\\nRepositories\\nProjects\\nPackages\\nStars\\nPinned\\nLoading\\n2024-coding-challenge\\n2024-coding-challenge\\nPublic\\nRepository for Coding Challenge 2024\\nJavaScript\\nChatGPT\\nChatGPT\\nPublic\\nTypeScript\\nfikri-3d-portofolio\\nfikri-3d-portofolio\\nPublic\\nJavaScript\\nnextjs-dashboard\\nnextjs-dashboard\\nPublic\\nNextjs-dashboard course\\nTypeScript\\nnextjs-postgre\\nnextjs-postgre\\nPublic\\nTypeScript\\nimaginify\\nimaginify\\nPublic\\nTypeScript\\nSomething went wrong, please refresh the page to try again.\\nIf the problem persists, check the\\nGitHub status page\\nor\\ncontact support\\n.\\nUh oh!\\nThere was an error while loading.\\nPlease reload this page\\n.\\nFooter\\n© 2025 GitHub,\\xa0Inc.\\nFooter navigation\\nTerms\\nPrivacy\\nSecurity\\nStatus\\nDocs\\nContact\\nManage cookies\\nDo not share my personal information\\nYou can’t perform that action at this time.'}]" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "messages_for(github)" ] }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "e64f497f-3742-4d70-9e15-29d1974b3361", "metadata": {}, "outputs": [], @@ -711,28 +198,17 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "95d0938d-0b26-4253-94a6-ac9240e7a8c9", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'# GitHub Profile Summary for Fikriraihan\\n\\n### 1️⃣ Profile Summary\\n- **Username:** Fikriraihan\\n- **Bio:** (No bio available)\\n- **Total public repositories:** 34\\n- **Total followers:** 0 (not indicated)\\n- **Total stars received:** 0\\n- **Top programming languages (by repo count):**\\n - JavaScript\\n - TypeScript\\n\\n### 2️⃣ Repository Highlights\\n**Top 3 repositories by activity:**\\n\\n1. **Repository Name:** 2024-coding-challenge\\n - **Description:** Repository for Coding Challenge 2024\\n - **Primary Language:** JavaScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** JavaScript\\n\\n2. **Repository Name:** ChatGPT\\n - **Description:** (No description provided)\\n - **Primary Language:** TypeScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** TypeScript\\n\\n3. **Repository Name:** fikri-3d-portofolio\\n - **Description:** (No description provided)\\n - **Primary Language:** JavaScript\\n - **Star Count:** 0\\n - **Last Updated Date:** (Not available)\\n - **Notable Technologies or Frameworks Used:** JavaScript\\n\\n### 3️⃣ Overall Assessment\\n- **What does this user specialize in?** \\n - Based on the repository languages, Fikriraihan appears to specialize in JavaScript and TypeScript.\\n\\n- **Are they more focused on personal projects or collaborations?** \\n - The profile indicates a focus on personal projects given the lack of followers and collaborations apparent from the repositories.\\n\\n- **Any standout strengths or skills you notice?** \\n - The presence of JavaScript and TypeScript projects suggests proficiency in web development, specifically in relation to modern frameworks.\\n\\n### 4️⃣ Recommendations\\n- **One area or technology to explore next to grow:**\\n - Fikriraihan could benefit from exploring backend technologies, such as Node.js or Express, to complement their front-end skills with JavaScript/TypeScript.\\n\\n- **One improvement to make their GitHub profile more appealing:**\\n - Adding a bio and descriptions for each repository would help provide context and showcase their intent and the purpose behind each project, thereby attracting more engagement and potential collaborators.'" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "summarize(\"https://github.com/Fikriraihan\")" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "id": "cd863db3-731a-46d8-ac14-f74f8ae39bd4", "metadata": {}, "outputs": [], @@ -744,66 +220,10 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "id": "70c5c3aa-2c06-460b-9c4f-6465d2c8611c", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "# GitHub Profile Summary for Fikriraihan\n", - "\n", - "### 1️⃣ Profile Summary\n", - "- **Username:** Fikriraihan\n", - "- **Bio:** Not available\n", - "- **Total public repositories:** 34\n", - "- **Total followers:** Not available\n", - "- **Total stars received:** 0\n", - "- **Top programming languages (by repo count):**\n", - " - JavaScript\n", - " - TypeScript\n", - "\n", - "### 2️⃣ Repository Highlights\n", - "Here are the top repositories based on their details:\n", - "\n", - "1. **Repository Name:** 2024-coding-challenge\n", - " - **Description:** Repository for Coding Challenge 2024\n", - " - **Primary Language:** JavaScript\n", - " - **Star Count:** 0\n", - " - **Last Updated Date:** Not available\n", - " - **Notable Technologies/Frameworks Used:** None specified\n", - "\n", - "2. **Repository Name:** ChatGPT\n", - " - **Description:** Not available\n", - " - **Primary Language:** TypeScript\n", - " - **Star Count:** 0\n", - " - **Last Updated Date:** Not available\n", - " - **Notable Technologies/Frameworks Used:** None specified\n", - "\n", - "3. **Repository Name:** fikri-3d-portofolio\n", - " - **Description:** Not available\n", - " - **Primary Language:** JavaScript\n", - " - **Star Count:** 0\n", - " - **Last Updated Date:** Not available\n", - " - **Notable Technologies/Frameworks Used:** None specified\n", - "\n", - "### 3️⃣ Overall Assessment\n", - "- **What does this user specialize in?** Fikriraihan specializes in JavaScript and TypeScript, indicating a focus on web development or applications that utilize these languages.\n", - "- **Are they more focused on personal projects or collaborations?** The presence of multiple repositories suggests a mix of personal projects. There is no indication of collaboration, as there are no mentions of contributions to external repositories.\n", - "- **Any standout strengths or skills you notice?** The variety of repositories shows an interest in different coding challenges and portfolio projects. However, the lack of stars suggests that the projects may not yet attract a significant audience.\n", - "\n", - "### 4️⃣ Recommendations\n", - "- **Suggest one area or technology they could explore next to grow:** Given the user’s focus on JavaScript and TypeScript, exploring frameworks like React, Vue.js, or even server-side technologies such as Node.js could be beneficial.\n", - "- **Suggest one improvement to make their GitHub profile more appealing:** Adding a bio with a brief introduction and interests, along with project descriptions, would provide more context about the user and enhance engagement with their repositories. Additionally, increasing the visibility of the repositories through more optimization and possibly sharing or collaborating on projects could attract more stars and followers." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "display_summary(\"https://github.com/Fikriraihan\")" ] From 33205380b621900f1c265f5064404d0004d33c8e Mon Sep 17 00:00:00 2001 From: Ritchy Date: Sun, 25 May 2025 17:11:12 +0100 Subject: [PATCH 05/23] AI stock adviser webscraping notebook --- .../day1- stock adviser webscrap.ipynb | 354 ++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100644 week1/community-contributions/day1- stock adviser webscrap.ipynb diff --git a/week1/community-contributions/day1- stock adviser webscrap.ipynb b/week1/community-contributions/day1- stock adviser webscrap.ipynb new file mode 100644 index 0000000..4872d5d --- /dev/null +++ b/week1/community-contributions/day1- stock adviser webscrap.ipynb @@ -0,0 +1,354 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "2e40e4f0-4f65-4f68-be50-07401959f46e", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fea8f921-7f2f-4942-9f88-cb6eb64ea731", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8d90ba3b-e50e-4a7d-820f-e669ea3679ff", + "metadata": {}, + "outputs": [], + "source": [ + "#call open AI\n", + "openai = OpenAI()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "046a59c6-56f5-4a09-89bd-8163075ad643", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "class Website:\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object for a Finance latest news\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " \n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " \n", + " # Find news headlines and content \n", + " news_data = []\n", + " \n", + " # Try different selectors \n", + " news_items = soup.find_all('h3') + soup.find_all('h2')\n", + " \n", + " for item in news_items:\n", + " headline = item.get_text(strip=True)\n", + " if headline and len(headline) > 20: # Filter out short/empty text\n", + " # Try to find content near the headline\n", + " content = \"\"\n", + " parent = item.find_parent()\n", + " if parent:\n", + " # Look for paragraph or summary text\n", + " summary = parent.find('p')\n", + " if summary:\n", + " content = summary.get_text(strip=True)[:300] + \"...\"\n", + " \n", + " news_data.append({'headline': headline, 'content': content})\n", + " \n", + " # Create the text content\n", + " self.text = \"Latest financial news headlines:\\n\\n\"\n", + " \n", + " # Get top 5 headlines with content\n", + " for i, news in enumerate(news_data[:10], 1):\n", + " self.text += f\"{i}. {news['headline']}\\n\"\n", + " if news['content']:\n", + " self.text += f\" Summary: {news['content']}\\n\"\n", + " self.text += \"\\n\"\n", + " \n", + " if not news_data:\n", + " self.text = \"No headlines found. Yahoo Finance structure may have changed.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b5b1c72e-bc74-4ed0-9a64-795ca9bac74d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Title: Yahoo Finance - Stock Market Live, Quotes, Business & Finance News\n", + "Top News:\n", + "Latest financial news headlines:\n", + "\n", + "1. US Risks Losing ‘Reliable Investment’ Status, Allianz GI Manager Says\n", + " Summary: (Bloomberg) -- Inside one of Europe’s biggest asset managers, there’s growing concern that Republican efforts to gut legislation supporting key industries such as clean energy may result in the US losing its status as a destination for investor capital.Most Read from BloombergNY Private School Plead...\n", + "\n", + "2. Why Intempus thinks robots should have a human physiological state\n", + " Summary: Teddy Warner, 19, has always been interested in robotics. His family was in the industry, and he says he \"grew up\" working in a machinist shop while in high school. Now Warner is building a robotics company of his own, Intempus, that looks to make robots a bit more human. Intempus is building tech t...\n", + "\n", + "3. Last 24 hours: TechCrunch Disrupt 2025 Early Bird Deals will fly away after today\n", + " Summary: Just 24 hours left to lock in Early Bird pricing for TechCrunch Disrupt 2025 — happening October 27–29 at Moscone West in San Francisco. Save up to $900 on your pass, or bring someone brilliant with you for 90% off their ticket. This deal ends tonight at 11:59 p.m. PT. Grab your Early Bird discount ...\n", + "\n", + "4. 48 hours left: What you won’t want to miss at the 20th TechCrunch Disrupt in October\n", + " Summary: ​​There are just 48 hours left to save up to $900 on your ticket to TechCrunch Disrupt 2025 — and get 90% off the second. After May 25 at 11:59 p.m. PT, Early Bird pricing vanishes — along with your best chance to join 10,000 of tech’s most forward-thinking minds for less. But forget the math for a ...\n", + "\n", + "5. More than a third of Americans say they want an 'adventurous retirement'\n", + " Summary: Retirement is no longer just about rocking chairs, gardening, grandchildren, or afternoons on the golf course....\n", + "\n", + "6. 'Unsustainable fiscal situation': Wall Street braces for more bond market turmoil as Trump tax bill stirs up deficit concerns\n", + " Summary: Surging Treasury yields signal deepening market fears as Trump's tax plan, soaring deficits, and global fiscal turmoil shake investor confidence....\n", + "\n", + "7. Nvidia has lost its shock power to investors, for now\n", + " Summary: Nvidia's quarter may be tougher than normal to assess. Here's why....\n", + "\n", + "8. Nvidia earnings, Trump tariff updates, and the Fed's preferred inflation gauge: What to know this week\n", + " Summary: A quarterly earnings release from Nvidia is set to greet investors in the week ahead as the stock market rally has hit pause....\n", + "\n", + "9. This week in Trumponomics: Bonds spoil the party\n", + " Summary: Trump is heading toward an important victory on tax cuts. Instead of cheering, markets are fretting....\n", + "\n", + "10. Manufacturers could benefit from Trump's 'big, beautiful' bill depending on what they make\n", + " Summary: Advocates for the manufacturing sector have hailed the advancement of Trump's \"big, beautiful bill,\" but at least two provisions in the 1,000-plus-page package could cut that ebullience for some factory owners....\n", + "\n", + "\n" + ] + } + ], + "source": [ + "website = Website(\"https://finance.yahoo.com/topic/latest-news/\")\n", + "\n", + "print(\"Title:\", website.title)\n", + "print(\"Top News:\")\n", + "print(website.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "2c0ac856-b0d8-4b15-8092-71ab3952a0d9", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt\n", + "system_prompt = \"\"\"You are a veteran stock market and finance expert with 50+ years of experience helping investors make safe, steady gains. Your audience is beginners with small amounts to invest (around $100). \n", + "\n", + "**Response Format:**\n", + "1. Start with \"The News Snapshot:\" - Write 3-4 lines summarizing the key financial developments from the provided headlines and summaries, showing you understand the current market situation, start the write up for this with today in the news we see that...\n", + "\n", + "2. Give specific stock advice based on the news:\n", + " - What to avoid and why\n", + " - 2-3 specific stock recommendations with ticker symbols\n", + " - Focus only on safe, dividend-paying stocks or clear beneficiaries from the news\n", + "\n", + "3. End with \"The big picture:\" - One sentence explaining the overall market condition\n", + "\n", + "4. Close with \"Your game plan:\" - Simple, actionable advice for their $100 to show how to split it\n", + "\n", + "**Tone & Style:**\n", + "- Talk like a knowledgeable but friendly Wall Street professional advising a beginner\n", + "- Keep it under 200 words total\n", + "- Use simple language, no complex jargon\n", + "- Be direct and practical\n", + "- Focus on capital preservation over quick gains\n", + "- Always relate advice directly to the news headlines provided\n", + "\n", + "**Key Rules:**\n", + "- Only recommend established, safe stocks\n", + "- Always explain WHY based on the news\n", + "- No speculative or meme stocks\n", + "- Emphasize learning over quick profits\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "077acf13-6e37-488f-a7c7-5f301266f57f", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a provide your investment advice for a beginner with $100. \\\n", + "Because it includes finance news or trend, let the advice be based on these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "1c129909-769c-49f0-a84d-85a25972463b", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "2c9f998f-639f-451b-a67e-5a95978ab70d", + "metadata": {}, + "outputs": [], + "source": [ + "def get_advice(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "402b4bb4-fbf4-4930-9cd1-4ede22491fa2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'**The News Snapshot:** Recent headlines reveal rising treasury yields and concerns over the US losing its \"reliable investment\" status, stoking fears of market uncertainty. Amidst this backdrop, investors may want to focus on stable, dividend-paying stocks that can weather the storm and provide consistent returns.\\n\\n**Stock Advice:**\\n- **Avoid speculative tech stocks** like Nvidia, which has recently shown volatility and uncertainty in earnings, leading to a potential loss of investor confidence.\\n- **Recommendation #1: Johnson & Johnson (JNJ)** – A well-established healthcare company that pays a reliable dividend, making it a safe bet in uncertain times.\\n- **Recommendation #2: Procter & Gamble (PG)** – Known for its strong brand portfolio and consistent dividend payouts, PG offers stability and resilience against market fluctuations.\\n- **Recommendation #3: Coca-Cola (KO)** – With a history of dividend increases, Coca-Cola remains a staple in many portfolios, providing that defensive position investors need right now.\\n\\n**The big picture:** The market is showing signs of concern, and investors should prioritize capital preservation over chasing quick returns.\\n\\n**Your game plan:** With your $100, consider investing in fractional shares of JNJ, PG, or KO to benefit from their dividends and stability while learning about long-term investing principles.'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_advice(\"https://finance.yahoo.com/topic/latest-news/\")" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "0427753f-6b47-4c36-b68f-0f22abd8a7cd", + "metadata": {}, + "outputs": [], + "source": [ + "def display_fin_advice(url):\n", + " advice_content = get_advice(url) \n", + " display(Markdown(advice_content))" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "1d26e64f-fdd0-4492-9b20-a54847b11139", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "The News Snapshot: Today in the news, we see that concerns are rising around the US potentially losing its appeal as a reliable investment destination due to political actions, particularly in clean energy. Rising Treasury yields and fiscal uncertainty, stemming from tax policies, are causing unease in the markets. Generally, investors are on alert due to potential repercussions for sectors reliant on government support and tax reform.\n", + "\n", + "Specific Stock Advice:\n", + "- I advise avoiding high-growth tech stocks like **Nvidia (NVDA)** for now, as their recent earnings show volatility and uncertainty. \n", + "- Instead, consider established dividend-paying stocks like **Johnson & Johnson (JNJ)** and **Procter & Gamble (PG)**. Both companies are less sensitive to political changes and provide steady dividends, making them safer bets during turbulent times.\n", + "- Another option is **3M Company (MMM)**, which has a strong history of dividend payments and benefits from potential manufacturing boosts tied to new legislation.\n", + "\n", + "The big picture: The market is navigating through uncertainties, particularly around fiscal policy and investment confidence.\n", + "\n", + "Your game plan: Split your $100 into three parts: $40 in Johnson & Johnson, $40 in Procter & Gamble, and keep $20 in cash for future opportunities or to cover transaction fees. This balanced approach aims for safety and steady growth." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_fin_advice(\"https://finance.yahoo.com/topic/latest-news/\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7567571d-b4c7-41be-9fd0-d65ae533a252", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2d6ec3eed7b132b5c47375fcfc277f30814d4f7f Mon Sep 17 00:00:00 2001 From: shikhidvaja Date: Sun, 25 May 2025 21:48:55 +0530 Subject: [PATCH 06/23] job recommendation based on the resume content. Suggests the suitable job role to apply for and provide the links from job sites based on location and sites user wants to search for. --- .../resume_based_job_recommender.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 week1/community-contributions/resume_based_job_recommender.py diff --git a/week1/community-contributions/resume_based_job_recommender.py b/week1/community-contributions/resume_based_job_recommender.py new file mode 100644 index 0000000..7daef4f --- /dev/null +++ b/week1/community-contributions/resume_based_job_recommender.py @@ -0,0 +1,104 @@ +from openai import OpenAI +from dotenv import load_dotenv +import os +import pypdf + +class ResumeBasedJobRecommendation: + def __init__(self, path: str): + self.resume_path = path + + # method to read the content from the resume and use it for the user prompt + def read_resume(self): + """method to read the content from the resume and use it for the user prompt. + + Returns: + content (str): returns the content of the resume. + """ + try: + pdfreader = pypdf.PdfReader(self.resume_path) + data = "" + for page_number in range(pdfreader.get_num_pages()): + page = pdfreader.pages[page_number] + data += page.extract_text() + except FileNotFoundError as e: + print(f"Issue with the resume file path: {str(e)}") + return + except Exception as e: + print(f"Couldn't able to parse the pdf : {str(e)}") + return + return data + + # + def message_prompt(self, data: str, job_sites: list, location: str): + """method suggests the appropriate job roles and provides the search link from job sites based on users input of resume data, job boards and location. + + Args: + data (str): resume content for user prompt + job_sites (list): job searching sites for user prompt + location (str): location of job search + + Returns: + content (str): Provides summary of resume with suggested job roles and links using gpt 4.o model. + """ + self.message = [ + {"role": "system", + "content": "You are an assistant that analysizes the resume data and summarize it. \ + Based on the summarization, you suggest the appropriate job roles \ + and provide the appropriate job search links for each suggested roles from the job sites based on filtering by the \ + location provided. " + }, + { + "role": "user", + "content": f"Below is my resume content, kindly look for the appropriate job openings in \ + {job_sites} for location {location}:\n{data}" + }] + self.response = openai.chat.completions.create(model='gpt-4o-mini', messages=self.message) + return self.response.choices[0].message.content + + +if __name__ == '__main__': + # load the api key from .env and check if it is valid. + load_dotenv() + + api_key = os.getenv('OPENAI_API_KEY') + + if api_key is None: + print("No api key was found.") + exit() + elif not api_key.startswith('sk-proj-'): + print("api key is present but it is not matching with the openai api key pattern starting with sk-proj-. Please check it.") + exit() + elif api_key.strip() != api_key: + print("api key is good but it seems it has the spaces at starting or the end. Please check and remove it.") + exit() + else: + print("api key is found and it looks good.") + + openai = OpenAI() + + #Provide the valid resume path + file_path = input("Kindly enter the resume path:\n") + if not file_path: + print("Resume path is not provided. Kindly provide the valid path.") + exit() + + obj = ResumeBasedJobRecommendation(file_path) + data = obj.read_resume() + + if not data: + pass + else: + #provide the input for the job sites to search and valid job location + job_sites = input("Enter the job sites with space between each other: ") + if not job_sites: + print("Didn't provided the job sites to search for. Going with Linkedin, Indeed, Glassdoor and Naukri as defaults.") + job_sites = ['LinkedIn', 'Indeed', 'Naukri', 'Glassdoor'] + else: + job_sites = job_sites.split(' ') + location = input("Enter the job location:") + if not location: + print("No location has been provided. Default will consider as United States.") + location = 'United States' + + response = obj.message_prompt(data, job_sites, location) + print(response) From 7fc4aa97539dd8e298b43289753b84c94dd16e4d Mon Sep 17 00:00:00 2001 From: Jack McInerney Date: Mon, 26 May 2025 17:30:08 -0700 Subject: [PATCH 07/23] Add my notebook on adverserial chatting to community-contributions --- .../MyAdverserialChat.ipynb | 330 ++++++++++++++++++ 1 file changed, 330 insertions(+) create mode 100644 community-contributions/mcinerney-adverserial/MyAdverserialChat.ipynb diff --git a/community-contributions/mcinerney-adverserial/MyAdverserialChat.ipynb b/community-contributions/mcinerney-adverserial/MyAdverserialChat.ipynb new file mode 100644 index 0000000..1424900 --- /dev/null +++ b/community-contributions/mcinerney-adverserial/MyAdverserialChat.ipynb @@ -0,0 +1,330 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "70a27b7c-3f3c-4d82-bdea-381939ce98bd", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "source": [ + "# My Adverserial Conversation\n", + "J. McInerney, 26 May 2025\n", + "I am taking some cells from the Week2, Day 1 notebook and modifying them so I can have an adverserial conversation between OpenAI and a local LLM (gemma3:12b). First I will just reimplement what Ed did in the Week2, Day 1 notebook. Then I will try a deeper conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ec14834-4cf2-4f1d-9128-4ddad7b91804", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "#import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98618ab4-075f-438c-b85b-d146e5299a87", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "95e69172-4601-4eb0-a7af-19abebd4bf56", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "markdown", + "id": "98f47886-71ae-4b41-875a-1b97a5eb0ddc", + "metadata": {}, + "source": [ + "## An adversarial conversation between Chatbots..\n", + "\n", + "You're already familar with prompts being organized into lists like:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", + "]\n", + "```\n", + "\n", + "In fact this structure can be used to reflect a longer conversation history:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "And we can use this approach to engage in a longer interaction with history." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74125f8b-042e-4236-ad3d-6371ce5a1493", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Gemma3:12b\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "local_model = 'gemma3:12b'\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "local_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "local_messages = [\"Hi\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f94d9232-f82a-4eab-9d89-bd9815f260f0", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, local in zip(gpt_messages, local_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": local})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6445453-31be-4c63-b350-957b7d99b6f4", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fc51f776-f6e2-41af-acb5-cbdf03fdf530", + "metadata": {}, + "outputs": [], + "source": [ + "basellm = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "def call_local():\n", + " messages = []\n", + " for gpt, local_message in zip(gpt_messages, local_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": local_message})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " \n", + " completion = basellm.chat.completions.create(\n", + " model=local_model,\n", + " messages=messages\n", + " )\n", + " \n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16fd90cb-ebfd-4a4f-ae49-70568ae8fbb1", + "metadata": {}, + "outputs": [], + "source": [ + "call_local()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "429eeefb-f080-4a57-8f2d-ff3d4237afab", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ce847ed-521d-4be5-895b-44088de499e1", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "local_messages = [\"Hi\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"local:\\n{local_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " local_next = call_local()\n", + " print(f\"local:\\n{local_next}\\n\")\n", + " local_messages.append(local_next)" + ] + }, + { + "cell_type": "markdown", + "id": "d3b1707a-2903-4529-b6eb-95a874a14e78", + "metadata": {}, + "source": [ + "## Let's try a more thoughful conversation\n", + "The two chatbots will engage in a friendly discussion on whether the US should have entered World War I in 1917. They are both open minded so they can learn from each other." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abb733bf-a5d3-4718-8741-8e8abfd3a088", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Gemma3:12b\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_system = \"You are a chatbot who believes it was a mistake for the US to enter World War I; \\\n", + "you are open to other arguments, but you feel the evidence suggests the world would have been \\\n", + "better off if the US had stayed isolationalist. You consider counter arguments but also express \\\n", + "your own arguments.\"\n", + "\n", + "local_system = \"You are a chatbot who believes the US made the right decision entering World War I in \\\n", + "1917. Overall, the world is a better place for it. You are open minded but believe the evidence \\\n", + "supports this view. You consider counter arguments but also express your own arguments.\"\n", + "\n", + "gpt_messages = [\"It was such a mistake for the US to enter WWI\"]\n", + "local_messages = [\"Why do you say that?\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "569e18a3-25cd-46d5-8edb-713ff149d008", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"local:\\n{local_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " local_next = call_local()\n", + " print(f\"local:\\n{local_next}\\n\")\n", + " local_messages.append(local_next)" + ] + }, + { + "cell_type": "markdown", + "id": "d29df7da-eaa3-4c98-b913-05185b62cffe", + "metadata": {}, + "source": [ + "## Conclusion\n", + "I am amazed at how insightful this conversation was. Not only did they explore all the pros and cons, they began applying those lessons to current day foreign policy. This looks like a very good way to explore a topic. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b486b2d6-40da-4745-8cbf-1afd2be22caa", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 776478f6e87578f5c13564a3a680efe4113f2ad4 Mon Sep 17 00:00:00 2001 From: sharathir Date: Wed, 28 May 2025 11:01:32 +0530 Subject: [PATCH 08/23] Adding Week 2 Day 4 Ticket Pricing using tools with Ollama by sharathir --- .../Wk2Day4_Ollama_Tools_Sharathir.ipynb | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 week2/community-contributions/Wk2Day4_Ollama_Tools_Sharathir.ipynb diff --git a/week2/community-contributions/Wk2Day4_Ollama_Tools_Sharathir.ipynb b/week2/community-contributions/Wk2Day4_Ollama_Tools_Sharathir.ipynb new file mode 100644 index 0000000..213d50b --- /dev/null +++ b/week2/community-contributions/Wk2Day4_Ollama_Tools_Sharathir.ipynb @@ -0,0 +1,195 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "989184c3-676b-4a68-8841-387ba0776e1d", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0ac9605-d28a-4c19-97e3-1dd3f9ac99ba", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. Respond to greetings and general conversation politely.\"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"\n", + "system_message += \"When a user asks for information that requires external data or action, use the available tools to get that information Specifically\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "533e6edf-454a-493d-b0a7-dbc29a5f3930", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = ollama.chat(model=\"llama3.2\", messages=messages)\n", + " return response['message']['content']\n", + "\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac22d421-a241-4c1f-bac4-db2150099ecc", + "metadata": {}, + "outputs": [], + "source": [ + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a0381b1-375c-44ac-8757-2fdde2c76541", + "metadata": {}, + "outputs": [], + "source": [ + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce5a7fd0-1ce1-4b53-873e-f55d1e39d847", + "metadata": {}, + "outputs": [], + "source": [ + "#tools = [{\"type\": \"function\", \"function\": price_function}]\n", + "tools = [\n", + " {\n", + " \"type\":\"function\",\n", + " \"function\":{\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\"\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " },\n", + " },\n", + " }\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06eab709-3f05-4697-a6a8-5f5bc1f442a5", + "metadata": {}, + "outputs": [], + "source": [ + "def handle_tool_call(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = tool_call.function.arguments\n", + " city = arguments.get('destination_city')\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " # \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d7f9af23-0683-40c3-a70b-0a385754688c", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = ollama.chat(model=\"llama3.2\", messages=messages,tools=tools)\n", + " if response['message'].get('tool_calls'):\n", + " message = response['message']\n", + " response, city = handle_tool_call(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " response = ollama.chat(model=\"llama3.2\", messages=messages)\n", + " \n", + " return response['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcfa39e2-92ce-48df-b735-f9bbfe638c81", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f5044e9-0ae8-4d88-a22f-d1180ab52434", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 9b85a94833d21ea0c9d53ea92170ace7dba01fd9 Mon Sep 17 00:00:00 2001 From: Jayapal Sahadevan Date: Wed, 28 May 2025 23:05:04 +0530 Subject: [PATCH 09/23] Added my contributions to community-contributions --- ...k1-day1-ollama-webpage-summarization.ipynb | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb diff --git a/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb new file mode 100644 index 0000000..27aaabb --- /dev/null +++ b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "4dabb31c-a584-4715-9714-9fc9978c3cb5", + "metadata": {}, + "outputs": [], + "source": [ + "#Get IPL best team" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "3bb88086-ea9c-4766-9baf-a57bb69c3202", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "9dc24243-d20a-48aa-b90b-26ef90233e22", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "cb35e3d1-8733-4931-8744-9c3754793161", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "63d62eb3-3255-4046-863e-d866a833d1a6", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "409a70a6-331a-4ea4-ab8d-7a46fffc70d7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "How about \"The A-Team 2.0\"? Because clearly, you’re aiming for a sequel that’s already better than the original. Or maybe \"The Not-So-Secret League of Awesome\"? That one’s a real conversation starter! What vibe are you going for?\n", + "[{'role': 'system', 'content': 'You are an assistant that analyzes the contents of a cric info website and provides a short summary of best team in IPL. Respond in markdown.'}, {'role': 'user', 'content': '\\n Get page title\\n'}]\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Best Team in IPL History\n", + "\n", + "The Indian Premier League (IPL) has seen various teams competing for the title since its inception in 2008. Some of the most successful teams in IPL history include:\n", + "\n", + "1. **Mumbai Indians**: They have clinched the IPL trophy a record five times (2013, 2015, 2017, 2019, 2020) and are known for their strong squad and strategic gameplay.\n", + "\n", + "2. **Chennai Super Kings**: With four titles (2010, 2011, 2018, 2021), the CSK has consistently been one of the top teams, led by the experienced MS Dhoni.\n", + "\n", + "3. **Kolkata Knight Riders**: They have won the championship twice (2012, 2014) and are recognized for their fan base and competitive spirit.\n", + "\n", + "Overall, the Mumbai Indians are often considered the best team in IPL history due to their multiple championships and consistent performance over the years." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Step 1: Create your prompts\n", + "system_prompt = \"You are an assistant that analyzes the contents of a cric info website \\\n", + "and provides a short summary of best team in IPL. \\\n", + "Respond in markdown.\"\n", + "\n", + "user_prompt = \"\"\"\n", + " Get page title\n", + "\"\"\"\n", + "\n", + "# Step 2: Make the messages list\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"Team name\"}\n", + "]\n", + "\n", + "# Step 3: Call OpenAI\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}]\n", + "\n", + "webUrl = \"https://www.google.com\"\n", + "print(messages_for(webUrl))\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "# Step 4: print the result\n", + "summary = summarize(webUrl)\n", + "display(Markdown(summary))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 30d1d58f07a380ba980f3b803f1babb71053e547 Mon Sep 17 00:00:00 2001 From: Jayapal Sahadevan Date: Wed, 28 May 2025 23:43:45 +0530 Subject: [PATCH 10/23] Added my contributions to community-contributions --- ...k1-day1-ollama-webpage-summarization.ipynb | 53 +++---------------- 1 file changed, 7 insertions(+), 46 deletions(-) diff --git a/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb index 27aaabb..95e6c32 100644 --- a/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb +++ b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb @@ -12,7 +12,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "3bb88086-ea9c-4766-9baf-a57bb69c3202", "metadata": {}, "outputs": [], @@ -27,18 +27,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "9dc24243-d20a-48aa-b90b-26ef90233e22", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API key found and looks good so far!\n" - ] - } - ], + "outputs": [], "source": [ "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", @@ -57,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "cb35e3d1-8733-4931-8744-9c3754793161", "metadata": {}, "outputs": [], @@ -67,7 +59,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "63d62eb3-3255-4046-863e-d866a833d1a6", "metadata": {}, "outputs": [], @@ -89,41 +81,10 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "409a70a6-331a-4ea4-ab8d-7a46fffc70d7", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "How about \"The A-Team 2.0\"? Because clearly, you’re aiming for a sequel that’s already better than the original. Or maybe \"The Not-So-Secret League of Awesome\"? That one’s a real conversation starter! What vibe are you going for?\n", - "[{'role': 'system', 'content': 'You are an assistant that analyzes the contents of a cric info website and provides a short summary of best team in IPL. Respond in markdown.'}, {'role': 'user', 'content': '\\n Get page title\\n'}]\n" - ] - }, - { - "data": { - "text/markdown": [ - "# Best Team in IPL History\n", - "\n", - "The Indian Premier League (IPL) has seen various teams competing for the title since its inception in 2008. Some of the most successful teams in IPL history include:\n", - "\n", - "1. **Mumbai Indians**: They have clinched the IPL trophy a record five times (2013, 2015, 2017, 2019, 2020) and are known for their strong squad and strategic gameplay.\n", - "\n", - "2. **Chennai Super Kings**: With four titles (2010, 2011, 2018, 2021), the CSK has consistently been one of the top teams, led by the experienced MS Dhoni.\n", - "\n", - "3. **Kolkata Knight Riders**: They have won the championship twice (2012, 2014) and are recognized for their fan base and competitive spirit.\n", - "\n", - "Overall, the Mumbai Indians are often considered the best team in IPL history due to their multiple championships and consistent performance over the years." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Step 1: Create your prompts\n", "system_prompt = \"You are an assistant that analyzes the contents of a cric info website \\\n", From 06b14a37b6753bc323281fb7b2411b6166eb050e Mon Sep 17 00:00:00 2001 From: Jayapal Sahadevan Date: Thu, 29 May 2025 00:06:39 +0530 Subject: [PATCH 11/23] Added my contributions to community-contributions - removed comment --- .../week1-day1-ollama-webpage-summarization.ipynb | 2 -- 1 file changed, 2 deletions(-) diff --git a/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb index 95e6c32..3df7751 100644 --- a/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb +++ b/week1/community-contributions/week1-day1-ollama-webpage-summarization.ipynb @@ -35,8 +35,6 @@ "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", "\n", - "# Check the key\n", - "\n", "if not api_key:\n", " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", "elif not api_key.startswith(\"sk-proj-\"):\n", From 0714e7d1d973937ec696eb39b84e5f43be90bc3c Mon Sep 17 00:00:00 2001 From: armangoudarzi91 Date: Wed, 28 May 2025 12:50:14 -0600 Subject: [PATCH 12/23] Add my notebook to community-contributions --- .../Day1-finance-journal-summarizer.ipynb | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 week1/community-contributions/Day1-finance-journal-summarizer.ipynb diff --git a/week1/community-contributions/Day1-finance-journal-summarizer.ipynb b/week1/community-contributions/Day1-finance-journal-summarizer.ipynb new file mode 100644 index 0000000..cffa355 --- /dev/null +++ b/week1/community-contributions/Day1-finance-journal-summarizer.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "code", + "source": [ + "import os, textwrap, time, requests\n", + "from bs4 import BeautifulSoup\n", + "from openai import OpenAI\n", + "from dotenv import load_dotenv\n", + "from urllib.parse import urljoin\n", + "\n", + "# ------------------ ENV & OpenAI ------------------\n", + "load_dotenv(override=True)\n", + "openai = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n", + "\n", + "UA = (\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n", + " \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117 Safari/537.36\")\n", + "BASE_URL = \"https://www.cambridge.org\"\n", + "JFQA_URL = f\"{BASE_URL}/core/journals/journal-of-financial-and-quantitative-analysis/latest-issue\"\n", + "\n", + "# ------------------ Helpers ------------------\n", + "def fetch_latest_issue(url: str) -> list[dict]:\n", + " \"\"\"Return unique {title, link} dicts for each research article.\"\"\"\n", + " soup = BeautifulSoup(\n", + " requests.get(url, headers={\"User-Agent\": UA}, timeout=30).text,\n", + " \"html.parser\"\n", + " )\n", + "\n", + " anchors = soup.find_all(\"a\", href=lambda h: h and \"/article/\" in h)\n", + " seen, articles = set(), []\n", + " for a in anchors:\n", + " href = a[\"href\"].split(\"?\")[0] # strip tracking params\n", + " if href in seen: # de‑duplicate\n", + " continue\n", + " seen.add(href)\n", + " title = a.get_text(\" \", strip=True)\n", + " full = urljoin(BASE_URL, href)\n", + " articles.append({\"title\": title, \"link\": full})\n", + " print(f\"Found {len(articles)} unique article links.\")\n", + " return articles\n", + "\n", + "def fetch_article_details(link: str) -> dict:\n", + " soup = BeautifulSoup(\n", + " requests.get(link, headers={\"User-Agent\": UA}, timeout=30).text,\n", + " \"html.parser\"\n", + " )\n", + "\n", + " # abstract\n", + " abs_tag = soup.find(\"div\", class_=\"abstract\")\n", + " abstract = abs_tag.get_text(\" \", strip=True) if abs_tag else \"N/A\"\n", + "\n", + " # publication date (meta is most reliable)\n", + " meta_date = soup.find(\"meta\", attrs={\"name\": \"citation_publication_date\"})\n", + " pub_date = meta_date[\"content\"] if meta_date else \"N/A\"\n", + "\n", + " # authors (multiple tags)\n", + " authors = [m[\"content\"] for m in soup.find_all(\"meta\",\n", + " attrs={\"name\": \"citation_author\"})]\n", + " authors_str = \", \".join(authors) or \"N/A\"\n", + "\n", + " return {\"abstract\": abstract, \"pub_date\": pub_date, \"authors\": authors_str}\n", + "\n", + "def summarise(txt: str) -> str:\n", + " prompt = (\"Summarise the following finance‑paper abstract in 2‑3 sentences, \"\n", + " \"mentioning the question, method, and main finding.\\n\\n\"\n", + " f\"Abstract:\\n{txt}\")\n", + " try:\n", + " rsp = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\",\n", + " \"content\": \"You are a helpful finance research assistant.\"},\n", + " {\"role\": \"user\", \"content\": prompt}],\n", + " temperature=0.2, max_tokens=120\n", + " )\n", + " return rsp.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " print(f\"⚠️ summarise error → {e}\")\n", + " return \"Summary unavailable.\"\n", + "\n", + "def scrape_jfqa_latest() -> None:\n", + " for art in fetch_latest_issue(JFQA_URL):\n", + " det = fetch_article_details(art[\"link\"])\n", + " if det[\"abstract\"] == \"N/A\":\n", + " print(f\"\\n📘 {art['title']} — no abstract found.\")\n", + " continue\n", + "\n", + " summary = summarise(det[\"abstract\"])\n", + " print(f\"\\n📘 {art['title']}\")\n", + " print(f\" Authors: {det['authors']}\")\n", + " print(f\" Date : {det['pub_date']}\")\n", + " print(f\" Journal: JFQA (Latest Issue)\")\n", + " print(\" Summary:\", textwrap.shorten(summary, width=600, placeholder=\"…\"))\n", + " print(\"-\" * 90)\n", + " time.sleep(1.0) # polite gap between OpenAI calls\n", + "\n", + "if __name__ == \"__main__\":\n", + " scrape_jfqa_latest()\n" + ], + "id": "e20b182f6258f0be", + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 15c2520925b342e8ce7b11a6fcbec85817aac0d6 Mon Sep 17 00:00:00 2001 From: Jeannine Jordan Date: Fri, 30 May 2025 06:15:59 -0400 Subject: [PATCH 13/23] Add community contributions for PR --- ...thical-antibot-async_jeannine-jordan.ipynb | 794 ++++++++++++++++++ ...ver-threaded-scraper_jeannine-jordan.ipynb | 626 ++++++++++++++ ...ls-code-and-UI-image_jeannine-jordan.ipynb | 349 ++++++++ 3 files changed, 1769 insertions(+) create mode 100644 week1/community-contributions/day1_ethical-antibot-async_jeannine-jordan.ipynb create mode 100644 week1/community-contributions/day5_shared-driver-threaded-scraper_jeannine-jordan.ipynb create mode 100644 week1/community-contributions/week1-EXERCISE_rewrite-internal-tools-code-and-UI-image_jeannine-jordan.ipynb diff --git a/week1/community-contributions/day1_ethical-antibot-async_jeannine-jordan.ipynb b/week1/community-contributions/day1_ethical-antibot-async_jeannine-jordan.ipynb new file mode 100644 index 0000000..70e5cc7 --- /dev/null +++ b/week1/community-contributions/day1_ethical-antibot-async_jeannine-jordan.ipynb @@ -0,0 +1,794 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# YOUR FIRST LAB\n", + "### Please read this section. This is valuable to get you prepared, even if it's a long read -- it's important stuff.\n", + "\n", + "## Your first Frontier LLM Project\n", + "\n", + "Let's build a useful LLM solution - in a matter of minutes.\n", + "\n", + "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", + "\n", + "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", + "\n", + "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", + "\n", + "## If you're new to Jupyter Lab\n", + "\n", + "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", + "\n", + "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", + "\n", + "## If you're new to the Command Line\n", + "\n", + "Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", + "\n", + "## If you'd prefer to work in IDEs\n", + "\n", + "If you're more comfortable in IDEs like VSCode, Cursor or PyCharm, they both work great with these lab notebooks too. \n", + "If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", + "\n", + "## If you'd like to brush up your Python\n", + "\n", + "I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", + "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", + "\n", + "## I am here to help\n", + "\n", + "If you have any problems at all, please do reach out. \n", + "I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", + "And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 \n", + "\n", + "## More troubleshooting\n", + "\n", + "Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", + "\n", + "## For foundational technical knowledge (eg Git, APIs, debugging) \n", + "\n", + "If you're relatively new to programming -- I've got your back! While it's ideal to have some programming experience for this course, there's only one mandatory prerequisite: plenty of patience. 😁 I've put together a set of self-study guides that cover Git and GitHub, APIs and endpoints, beginner python and more.\n", + "\n", + "This covers Git and GitHub; what they are, the difference, and how to use them: \n", + "https://github.com/ed-donner/agents/blob/main/guides/03_git_and_github.ipynb\n", + "\n", + "This covers technical foundations: \n", + "ChatGPT vs API; taking screenshots; Environment Variables; Networking basics; APIs and endpoints: \n", + "https://github.com/ed-donner/agents/blob/main/guides/04_technical_foundations.ipynb\n", + "\n", + "This covers Python for beginners, and making sure that a `NameError` never trips you up: \n", + "https://github.com/ed-donner/agents/blob/main/guides/06_python_foundations.ipynb\n", + "\n", + "This covers the essential techniques for figuring out errors: \n", + "https://github.com/ed-donner/agents/blob/main/guides/08_debugging.ipynb\n", + "\n", + "And you'll find other useful guides in the same folder in GitHub. Some information applies to my other Udemy course (eg Async Python) but most of it is very relevant for LLM engineering.\n", + "\n", + "## If this is old hat!\n", + "\n", + "If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress. Ultimately we will fine-tune our own LLM to compete with OpenAI!\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Please read - important note

\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

This code is a live resource - keep an eye out for my emails

\n", + " I push updates to the code regularly. As people ask questions, I add more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but I've also added better explanations and new models like DeepSeek. Consider this like an interactive book.

\n", + " I try to send emails regularly with important updates related to the course. You can find this in the 'Announcements' section of Udemy in the left sidebar. You can also choose to receive my emails via your Notification Settings in Udemy. I'm respectful of your inbox and always try to add value with my emails!\n", + "
\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business value of these exercises

\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.chrome.service import Service\n", + "from webdriver_manager.chrome import ChromeDriverManager\n", + "import time\n", + "import random\n", + "from urllib import robotparser\n", + "from urllib.parse import urlparse\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to OpenAI (or Ollama)\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI. \n", + "\n", + "If you'd like to use free Ollama instead, please see the README section \"Free Alternative to Paid APIs\", and if you're not sure how to do this, there's a full solution in the solutions folder (day1_with_ollama.ipynb).\n", + "\n", + "## Troubleshooting if you have problems:\n", + "\n", + "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", + "\n", + "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", + "\n", + "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", + "\n", + "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```python\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "```\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with system and user messages:\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business applications

\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you continue - now try yourself

\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 1: Create your prompts\n", + "\n", + "system_prompt = \"\"\"\n", + "You are an assistant that creates short clear concise and relevant email \n", + "subject lines based on the content of the email\n", + "\"\"\"\n", + "user_prompt = \"\"\"\n", + "Hi team,\n", + "\n", + "Just a quick update on our Q2 progress. We’ve exceeded our sales goals by 15% and customer satisfaction scores are up 10 points from last quarter. Kudos to everyone involved, especially the sales and support teams. Let’s keep this momentum going as we head into Q3.\n", + "\n", + "Best,\n", + "Jeannine\n", + "\"\"\"\n", + "\n", + "# Step 2: Make the messages list\n", + "\n", + "messages = [\n", + " {\"role\":\"system\", \"content\":system_prompt},\n", + " {\"role\":\"user\", \"content\":f\"Email:{user_prompt}/n/nGenerate a concise subject line for this email.\"}\n", + "] # fill this in\n", + "\n", + "# Step 3: Call OpenAI\n", + "\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages,\n", + " temperature=0.5,\n", + " max_tokens=20\n", + ")\n", + "\n", + "# Step 4: print the result\n", + "\n", + "print(\"Suggested subject line: \", response.choices[0].message.content.strip())" + ] + }, + { + "cell_type": "markdown", + "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", + "metadata": {}, + "source": [ + "## An extra exercise for those who enjoy web scraping\n", + "\n", + "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" + ] + }, + { + "cell_type": "markdown", + "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", + "metadata": {}, + "source": [ + "# Sharing your code\n", + "\n", + "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", + "\n", + "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", + "\n", + "Here are good instructions courtesy of an AI friend: \n", + "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", + "metadata": {}, + "outputs": [], + "source": [ + "# A modified class to fetch and parse fully rendered pages: with ethically reduced CAPTCHA events\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using Selenium and BeautifulSoup.\n", + " Render JavaScript content and extract text from the page.\n", + " \"\"\"\n", + " self.url = url\n", + "\n", + " if not self._is_allowed_by_robots(url):\n", + " print(f\"Warning: robots.txt does not explicitly allow webscraping of {url}. Proceeding anyway.\")\n", + " self.text, self.title = self._scrape_content()\n", + "\n", + " # Check robots.txt if scraping is allowed\n", + " def _is_allowed_by_robots(self, url, user_agent=\"*\"):\n", + " parsed = urlparse(url)\n", + " robots_url = f\"{parsed.scheme}://{parsed.netloc}/robots.txt\"\n", + " rp = urllib.robotparser.RobotFileParser()\n", + " rp.set_url(robots_url)\n", + " try:\n", + " rp.read()\n", + " return rp.can_fetch(user_agent, url)\n", + " except Exception:\n", + " # If robots.txt is unreachable, assume permissable\n", + " return True\n", + "\n", + " def _scrape_content(self, retries=3, wait_base=5):\n", + " # List of user agents for rotation\n", + " user_agents = [\n", + " \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\",\n", + " \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.1 Safari/605.1.15\",\n", + " \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0\"\n", + " ]\n", + " # Rotate user agents infrequently\n", + " selected_agent = random.choice(user_agents)\n", + " \n", + " # Set up headless Chrome options\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--disable-dev-shm-usage\")\n", + " options.add_argument(f\"user-agent={selected_agent}\")\n", + "\n", + " # Try to bypass anti-bot protections with exponential backoff\n", + " for attempt in range(retries):\n", + " try:\n", + " # Start browser\n", + " service = Service(ChromeDriverManager().install())\n", + " driver = webdriver.Chrome(service=service, options=options)\n", + " driver.set_page_load_timeout(30)\n", + " driver.get(self.url)\n", + "\n", + " # Mimick human browsing behavior with random time delay, without overloading the server\n", + " time.sleep(random.uniform(6, 12))\n", + " \n", + " # Get the page source after rendering\n", + " soup = BeautifulSoup(driver.page_source, 'html.parser')\n", + " driver.quit()\n", + "\n", + " for tag in soup([\"script\", \"style\", \"img\", \"input\"]):\n", + " tag.decompose()\n", + " \n", + " title = soup.title.string.strip() if soup.title and soup.title.string else \"No title found\"\n", + " body = soup.body\n", + " text = soup.body.get_text(separator=\"\\n\", strip=True) if body else \"No content found.\"\n", + "\n", + " return text, title\n", + " \n", + " except Exception as e:\n", + " # Exponential backoff to avoid retry spamming on failure\n", + " time.sleep(wait_base * (2 ** attempt)) \n", + " continue\n", + "\n", + " raise Exception(\"Failed to retrieve content despite retries.\")\n", + "\n", + "\n", + "rendered_page = Website(\"https://openai.com\")\n", + "print(\"\\nTitle: \", rendered_page.title)\n", + "print(\"\\nText: \", rendered_page.text, \"\\n\")\n", + "#print(\"\\nUser prompt: \", user_prompt_for(rendered_page), \"\\n\")\n", + "#messages_for(rendered_page)\n", + "#summarize(\"https://openai.com\")\n", + "\n", + "display_summary(\"https://openai.com\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "781119a4-844c-4e03-84bd-8b8f2200d86c", + "metadata": {}, + "outputs": [], + "source": [ + "# With Async for multiple page scraping: using Selenium and Jupyter Labs\n", + "import nest_asyncio # Required for Jupyter notebook\n", + "import asyncio\n", + "from concurrent.futures import ThreadPoolExecutor\n", + "\n", + "# Async-safe wrapper for multiple URLs: because Selenium is synchronous\n", + "def scrape_sync(url):\n", + " try:\n", + " page = Website(url)\n", + " return {\n", + " \"url\": url,\n", + " \"title\": page.title,\n", + " \"text\": page.text,\n", + " \"summary\": display_summary(url)\n", + " }\n", + " except Exception as e:\n", + " return {\n", + " \"url\": url,\n", + " \"error\": str(e)\n", + " }\n", + "\n", + "\n", + "# Async runner for multiple URLs\n", + "async def scrape_multiple_async(urls, max_workers=4):\n", + " loop = asyncio.get_running_loop()\n", + " with ThreadPoolExecutor(max_workers=max_workers) as executor:\n", + " futures = [\n", + " loop.run_in_executor(executor, scrape_sync, url)\n", + " for url in urls\n", + " ]\n", + " return await asyncio.gather(*futures)\n", + "\n", + "\n", + "# Example async usage\n", + "if __name__ == \"__main__\":\n", + " urls_to_scrape = [\n", + " \"https://www.investopedia.com/articles/active-trading/111115/why-all-worlds-top-10-companies-are-american.asp\",\n", + " \"https://fortune.com/ranking/global500/\",\n", + " \"http://en.wikipedia.org/wiki/List_of_largest_corporate_profits_and_losses\",\n", + " ]\n", + "\n", + " async def run():\n", + " results = await scrape_multiple_async(urls_to_scrape)\n", + " for res in results:\n", + " print(f\"\\nURL: {res.get('url')}\")\n", + " print(f\"Title: {res.get('title', 'N/A')}\")\n", + " print(f\"Preview:\\n{res.get('text', res.get('error', 'No content'))}\\n\")\n", + "\n", + " # Jupyter notebook already has a running event loop: asyncio.run() cannot be called from a running event loop\n", + " nest_asyncio.apply()\n", + " await run()\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32fa56f2-f78e-421f-b35e-77fb9608d652", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day5_shared-driver-threaded-scraper_jeannine-jordan.ipynb b/week1/community-contributions/day5_shared-driver-threaded-scraper_jeannine-jordan.ipynb new file mode 100644 index 0000000..698145e --- /dev/null +++ b/week1/community-contributions/day5_shared-driver-threaded-scraper_jeannine-jordan.ipynb @@ -0,0 +1,626 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a98030af-fcd1-4d63-a36e-38ba053498fa", + "metadata": {}, + "source": [ + "# A full business solution\n", + "\n", + "## Now we will take our project from Day 1 to the next level\n", + "\n", + "### BUSINESS CHALLENGE:\n", + "\n", + "Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", + "\n", + "We will be provided a company name and their primary website.\n", + "\n", + "See the end of this notebook for examples of real-world business applications.\n", + "\n", + "And remember: I'm always available if you have problems or ideas! Please do reach out." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5b08506-dc8b-4443-9201-5f1848161363", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.chrome.service import Service\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.support.ui import WebDriverWait\n", + "from selenium.webdriver.support import expected_conditions as EC\n", + "from webdriver_manager.chrome import ChromeDriverManager\n", + "from urllib.parse import urlparse, urljoin\n", + "import time\n", + "import random\n", + "import concurrent.futures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize and constants\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "MODEL = 'gpt-4o-mini'\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "106dd65e-90af-4ca8-86b6-23a41840645b", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", + "\n", + "\n", + "# A modified class to fetch and parse fully rendered pages\n", + "class NewWebsite:\n", + " shared_driver = None # Class variable to share browser instance\n", + "\n", + " def __init__(self, url, driver=None):\n", + " self.url = url\n", + " self.driver = driver or NewWebsite._get_shared_driver()\n", + " self.text, self.title, self.links = self._scrape_content()\n", + " \n", + " @classmethod\n", + " def _get_shared_driver(cls):\n", + " if cls.shared_driver is None:\n", + " # Set up headless Chrome options\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--disable-dev-shm-usage\")\n", + " options.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\")\n", + "\n", + " service = Service(ChromeDriverManager().install())\n", + " cls.shared_driver = webdriver.Chrome(service=service, options=options)\n", + " return cls.shared_driver\n", + "\n", + " def _scrape_content(self):\n", + " try:\n", + " self.driver.get(self.url)\n", + " # Mimick human browsing behavior without overloading the server\n", + " WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.TAG_NAME, \"a\")))\n", + " # Allow JS-rendered content to settle\n", + " time.sleep(2)\n", + "\n", + " # Get the page source after rendering\n", + " soup = BeautifulSoup(self.driver.page_source, \"html.parser\")\n", + " \n", + " for tag in soup([\"script\", \"style\", \"img\", \"input\"]):\n", + " tag.decompose()\n", + " \n", + " title = soup.title.string.strip() if soup.title and soup.title.string else \"No title found\"\n", + " body = soup.body\n", + " text = soup.body.get_text(separator=\"\\n\", strip=True) if body else \"No content found.\"\n", + "\n", + " # Extract and clean links\n", + " links = []\n", + " for link_tag in soup.find_all(\"a\", href=True):\n", + " href = link_tag[\"href\"].strip()\n", + " if href and not href.startswith((\"mailto:\", \"tel:\", \"javascript:\")):\n", + " full_url = urljoin(self.url, href)\n", + " links.append(full_url)\n", + " \n", + " return text, title, links\n", + " \n", + " except Exception as e:\n", + " return \"Error loading content\", \"Error\", []\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", + "\n", + " # Close the driver\n", + " @classmethod\n", + " def close_driver(cls):\n", + " if cls.shared_driver:\n", + " cls.shared_driver.quit()\n", + " cls.shared_driver = None\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", + "metadata": {}, + "outputs": [], + "source": [ + "cardiff = NewWebsite(\"https://cardiff.co/\")\n", + "cardiff.links" + ] + }, + { + "cell_type": "markdown", + "id": "1771af9c-717a-4fca-bbbe-8a95893312c3", + "metadata": {}, + "source": [ + "## First step: Have GPT-4o-mini figure out which links are relevant\n", + "\n", + "### Use a call to gpt-4o-mini to read the links on a webpage, and respond in structured JSON. \n", + "It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n", + "We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n", + "\n", + "This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n", + "\n", + "Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6957b079-0d96-45f7-a26a-3487510e9b35", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b97e4068-97ed-4120-beae-c42105e4d59a", + "metadata": {}, + "outputs": [], + "source": [ + "print(link_system_prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6bcbfa78-6395-4685-b92c-22d592050fd7", + "metadata": {}, + "outputs": [], + "source": [ + "print(get_links_user_prompt(cardiff))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", + "metadata": {}, + "outputs": [], + "source": [ + "# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", + "\n", + "huggingface = Website(\"https://huggingface.co\")\n", + "huggingface.links" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", + "metadata": {}, + "outputs": [], + "source": [ + "get_links(\"https://cardiff.co\")" + ] + }, + { + "cell_type": "markdown", + "id": "0d74128e-dfb6-47ec-9549-288b621c838c", + "metadata": {}, + "source": [ + "## Second step: make the brochure!\n", + "\n", + "Assemble all the details into another prompt to GPT4-o" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", + "metadata": {}, + "outputs": [], + "source": [ + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result\n", + "\n", + "def get_all_details_rendered(url):\n", + " result = \"Landing page:\\n\"\n", + " result += NewWebsite(url).get_contents()\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + "\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += NewWebsite(link[\"url\"]).get_contents()\n", + "\n", + " # Important: close browser after all scraping is done\n", + " NewWebsite.close_driver()\n", + " return result\n", + "\n", + "def scrape_link(link):\n", + " try:\n", + " page = NewWebsite(link[\"url\"])\n", + " return f\"\\n\\n{link['type']}\\n{page.get_contents()}\"\n", + " except Exception as e:\n", + " return f\"\\n\\n{link['type']}\\nError loading page: {e}\"\n", + "\n", + "# Threaded scraper for linked pages\n", + "def get_all_details_rendered_concurrently(url):\n", + " result = \"Landing page:\\n\"\n", + " result += NewWebsite(url).get_contents()\n", + "\n", + " # LLM-filtered link generator\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + "\n", + " with concurrent.futures.ThreadPoolExecutor() as executor:\n", + " future_to_link = {executor.submit(scrape_link, link): link for link in links[\"links\"]}\n", + " for future in concurrent.futures.as_completed(future_to_link):\n", + " result += future.result()\n", + "\n", + " # Close shared browser\n", + " NewWebsite.close_driver()\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", + "metadata": {}, + "outputs": [], + "source": [ + "print(get_all_details_rendered_concurrently(\"https://cardiff.co\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "Include details of company culture, customers and careers/jobs if you have the information.\"\n", + "\n", + "# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", + "\n", + "# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "# Include details of company culture, customers and careers/jobs if you have the information.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " #user_prompt += get_all_details(url)\n", + " user_prompt += get_all_details_rendered_concurrently(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd909e0b-1312-4ce2-a553-821e795d7572", + "metadata": {}, + "outputs": [], + "source": [ + "get_brochure_user_prompt(\"Cardiff\", \"https://cardiff.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", + "metadata": {}, + "outputs": [], + "source": [ + "def create_brochure(company_name, url):\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " )\n", + " result = response.choices[0].message.content\n", + " display(Markdown(result))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e093444a-9407-42ae-924a-145730591a39", + "metadata": {}, + "outputs": [], + "source": [ + "create_brochure(\"Cardiff\", \"https://cardiff.co\")" + ] + }, + { + "cell_type": "markdown", + "id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18", + "metadata": {}, + "source": [ + "## Finally - a minor improvement\n", + "\n", + "With a small adjustment, we can change this so that the results stream back from OpenAI,\n", + "with the familiar typewriter animation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51db0e49-f261-4137-aabe-92dd601f7725", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure(company_name, url):\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", + "metadata": {}, + "outputs": [], + "source": [ + "stream_brochure(\"Cardiff\", \"https://cardiff.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fdb3f8d8-a3eb-41c8-b1aa-9f60686a653b", + "metadata": {}, + "outputs": [], + "source": [ + "# Try changing the system prompt to the humorous version when you make the Brochure for Hugging Face:\n", + "\n", + "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "markdown", + "id": "a27bf9e0-665f-4645-b66b-9725e2a959b5", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business applications

\n", + " In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", + "\n", + "This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", + "\n", + "Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype. See what other students have done in the community-contributions folder -- so many valuable projects -- it's wild!\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "14b2454b-8ef8-4b5c-b928-053a15e0d553", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you move to Week 2 (which is tons of fun)

\n", + " Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "17b64f0f-7d33-4493-985a-033d06e8db08", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

A reminder on 3 useful resources

\n", + " 1. The resources for the course are available here.
\n", + " 2. I'm on LinkedIn here and I love connecting with people taking the course!
\n", + " 3. I'm trying out X/Twitter and I'm at @edwarddonner and hoping people will teach me how it's done.. \n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "6f48e42e-fa7a-495f-a5d4-26bfc24d60b6", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Finally! I have a special request for you

\n", + " \n", + " My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8d3e1a1-ba54-4907-97c5-30f89a24775b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/week1-EXERCISE_rewrite-internal-tools-code-and-UI-image_jeannine-jordan.ipynb b/week1/community-contributions/week1-EXERCISE_rewrite-internal-tools-code-and-UI-image_jeannine-jordan.ipynb new file mode 100644 index 0000000..6449246 --- /dev/null +++ b/week1/community-contributions/week1-EXERCISE_rewrite-internal-tools-code-and-UI-image_jeannine-jordan.ipynb @@ -0,0 +1,349 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display, Image\n", + "from openai import OpenAI\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.chrome.service import Service\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.support.ui import WebDriverWait\n", + "from selenium.webdriver.support import expected_conditions as EC\n", + "from webdriver_manager.chrome import ChromeDriverManager\n", + "from urllib.parse import urlparse, urljoin\n", + "import time\n", + "import random\n", + "import concurrent.futures\n", + "import re" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL = 'gpt-4o-mini'\n", + "openai = OpenAI()\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "\n", + "# A modified class to fetch and parse fully rendered pages\n", + "class NewWebsite:\n", + " shared_driver = None # Class variable to share browser instance\n", + "\n", + " def __init__(self, url, driver=None):\n", + " self.url = url\n", + " self.driver = driver or NewWebsite._get_shared_driver()\n", + " self.text, self.title, self.links = self._scrape_content()\n", + " \n", + " @classmethod\n", + " def _get_shared_driver(cls):\n", + " if cls.shared_driver is None:\n", + " # Set up headless Chrome options\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--disable-dev-shm-usage\")\n", + " options.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\")\n", + "\n", + " service = Service(ChromeDriverManager().install())\n", + " cls.shared_driver = webdriver.Chrome(service=service, options=options)\n", + " return cls.shared_driver\n", + "\n", + " def _scrape_content(self):\n", + " try:\n", + " self.driver.get(self.url)\n", + " # Mimick human browsing behavior without overloading the server\n", + " WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.TAG_NAME, \"a\")))\n", + " # Allow JS-rendered content to settle\n", + " time.sleep(2)\n", + "\n", + " # Get the page source after rendering\n", + " soup = BeautifulSoup(self.driver.page_source, \"html.parser\")\n", + " \n", + " for tag in soup([\"script\", \"style\", \"img\", \"input\"]):\n", + " tag.decompose()\n", + " \n", + " title = soup.title.string.strip() if soup.title and soup.title.string else \"No title found\"\n", + " body = soup.body\n", + " text = soup.body.get_text(separator=\"\\n\", strip=True) if body else \"No content found.\"\n", + "\n", + " # Extract and clean links\n", + " links = []\n", + " for link_tag in soup.find_all(\"a\", href=True):\n", + " href = link_tag[\"href\"].strip()\n", + " if href and not href.startswith((\"mailto:\", \"tel:\", \"javascript:\")):\n", + " full_url = urljoin(self.url, href)\n", + " links.append(full_url)\n", + " \n", + " return text, title, links\n", + " \n", + " except Exception as e:\n", + " return \"Error loading content\", \"Error\", []\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", + "\n", + " # Close the driver\n", + " @classmethod\n", + " def close_driver(cls):\n", + " if cls.shared_driver:\n", + " cls.shared_driver.quit()\n", + " cls.shared_driver = None\n", + "\n", + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"\n", + "\n", + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt\n", + "\n", + "def get_links(url):\n", + " website = NewWebsite(url)\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)\n", + "\n", + "def scrape_link(link):\n", + " try:\n", + " page = NewWebsite(link[\"url\"])\n", + " return f\"\\n\\n{link['type']}\\n{page.get_contents()}\"\n", + " except Exception as e:\n", + " return f\"\\n\\n{link['type']}\\nError loading page: {e}\"\n", + "\n", + "# Threaded scraper for linked pages\n", + "def get_all_details_rendered_concurrently(url):\n", + " result = \"Landing page:\\n\"\n", + " result += NewWebsite(url).get_contents()\n", + "\n", + " # LLM-filtered link generator\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + "\n", + " with concurrent.futures.ThreadPoolExecutor() as executor:\n", + " future_to_link = {executor.submit(scrape_link, link): link for link in links[\"links\"]}\n", + " for future in concurrent.futures.as_completed(future_to_link):\n", + " result += future.result()\n", + "\n", + " # Close shared browser\n", + " NewWebsite.close_driver()\n", + " return result\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "system_prompt = \"You are an LLM Engineer that analyzes the contents of several relevant pages from a company website \\\n", + "rewrites internal tools and systems and rebuilds them end-to-end, starting from scratch. Starting with the online application at cardiff.co/apply, \\\n", + "Tell me why you're best suited to be the lead of this project and work with our 12 year resident developer to implement a \\\n", + "state of the art solution in record time. Include backend architecture, model orchestration, how you handle latency, cost and user experience, \\\n", + "and details of how you would achieve this goal based on company culture and industries served if you have the information, \\\n", + "and walk me through the details like you're explaining it to a sharp product owner. Respond in markdown.\"\\\n", + "\n", + "\n", + "def get_solution_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a solution to rewrite the company's application in markdown.\\n\"\n", + " #user_prompt += get_all_details(url)\n", + " user_prompt += get_all_details_rendered_concurrently(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt\n", + "\n", + "def create_solution(company_name, url):\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_solution_user_prompt(company_name, url)}\n", + " ],\n", + " )\n", + " result = response.choices[0].message.content\n", + " display(Markdown(result))\n", + "\n", + " return result\n", + "\n", + "#create_solution(\"Cardiff\", \"https://cardiff.co\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "\n", + "new_system_prompt = \"You are a Senior Engineer that analyzes the planned solution given to you for a company website \\\n", + "and you rewrite code for rebuilding internal tools and systems end-to-end based on the proposed solutions. \\\n", + "Start with the online application at cardiff.co/apply, use canvas and write code for the proposed solution \\\n", + "in the appropriate language that best suits the task for backend architecture, model orchestration, how you handle latency, cost and user experience wherever possible.\"\n", + "\n", + "output_dir = \"cardiff_rebuild_output\"\n", + "os.makedirs(output_dir, exist_ok=True)\n", + "\n", + "def save_code_blocks(markdown_text, base_filename=\"cardiff_code\"):\n", + " output_dir = \"cardiff_rebuild_output\"\n", + " os.makedirs(output_dir, exist_ok=True)\n", + " \n", + " code_blocks = re.findall(r\"```(.*?)\\n(.*?)```\", markdown_text, re.DOTALL)\n", + " saved_files = []\n", + "\n", + " for idx, (language, code) in enumerate(code_blocks, 1):\n", + " ext = language.strip() if language else \"txt\"\n", + " filename = f\"{base_filename}_part{idx}.{ext}\"\n", + " filepath = os.path.join(output_dir, filename)\n", + " with open(filepath, \"w\", encoding=\"utf-8\") as f:\n", + " f.write(code)\n", + " saved_files.append(filepath)\n", + "\n", + " return saved_files\n", + "\n", + "def develop_from_proposal(proposal_text, company_name):\n", + " # Stream code generation from GPT-4o\n", + " system = \"You are a senior software engineer. Use the following proposal to generate production-ready code to \\\n", + " implement the backend, frontend, and any orchestration described. Write clean, documented code in markdown format.\"\n", + " \n", + " stream = openai.chat.completions.create(\n", + " model=\"gpt-4o\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system},\n", + " {\"role\": \"user\", \"content\": proposal_text}\n", + " ],\n", + " stream=True\n", + " )\n", + "\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " content = chunk.choices[0].delta.content or \"\"\n", + " response += content\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + "\n", + " saved_files = save_code_blocks(response)\n", + " \n", + " # Generate a UI design mockup image\n", + " image_prompt = f\"A modern, mobile-friendly UI wireframe for a business loan application system for {company_name}. Clean layout, input fields for business name, revenue, loan amount, industry, and contact info. Includes a step-by-step progress bar, submit button, and secure branding.\"\n", + " \n", + " img_response = openai.images.generate(\n", + " model=\"dall-e-3\",\n", + " prompt=image_prompt,\n", + " n=1,\n", + " size=\"1024x1024\"\n", + " )\n", + " \n", + " image_url = img_response.data[0].url\n", + " img_path = os.path.join(output_dir, f\"{company_name.lower()}_ui_mockup.png\")\n", + " with open(img_path, 'wb') as handler:\n", + " handler.write(requests.get(image_url).content)\n", + "\n", + " print(\"Code files saved to:\", saved_files)\n", + " print(\"UI mockup saved at:\", img_path)\n", + "\n", + " display(Markdown(\"### Proposed UI Design\"))\n", + " display(Image(url=image_url))\n", + "\n", + "proposal = create_solution(\"Cardiff\", \"https://cardiff.co\")\n", + "develop_from_proposal(proposal, \"Cardiff\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "# Get Llama 3.2 to answer" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 0010efa94631ca1ed3f918aec4d2f2b29c8b5752 Mon Sep 17 00:00:00 2001 From: "Marcus.Rosen" Date: Mon, 2 Jun 2025 16:33:02 +1000 Subject: [PATCH 14/23] Added LiteLLM exampel for Week2 Day2 --- .../Week2_Day2_Litellm.ipynb | 420 ++++++++++++++++++ 1 file changed, 420 insertions(+) create mode 100644 week2/community-contributions/Week2_Day2_Litellm.ipynb diff --git a/week2/community-contributions/Week2_Day2_Litellm.ipynb b/week2/community-contributions/Week2_Day2_Litellm.ipynb new file mode 100644 index 0000000..af49175 --- /dev/null +++ b/week2/community-contributions/Week2_Day2_Litellm.ipynb @@ -0,0 +1,420 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6a08763a-aed6-4f91-94d0-80a3c0e2665b", + "metadata": {}, + "source": [ + "### Weeks 2 - Day 2 - Gradio Chatbot with LiteLLM (Model Routing)" + ] + }, + { + "cell_type": "markdown", + "id": "a4f38c58-5ceb-4d5e-b538-c1acdc881f73", + "metadata": {}, + "source": [ + "**Author** : [Marcus Rosen](https://github.com/MarcusRosen)" + ] + }, + { + "cell_type": "markdown", + "id": "36f4814a-2bfc-4631-97d7-7a474fa1cc8e", + "metadata": {}, + "source": [ + "[LiteLLM](https://docs.litellm.ai/docs/) provides the abilitty to call different LLM providers via a unified interface, returning results in OpenAI compatible formats.\n", + "\n", + "Features:\n", + "- Model Selection in Gradio (Anthropic, OpenAI, Gemini)\n", + "- Single Inference function for all model providers via LiteLLM (call_llm)\n", + "- Streaming **NOTE:** Bug when trying to stream in Gradio, but works directly in Notebook\n", + "- Debug Tracing" + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "id": "b6c12598-4773-4f85-93ca-0128d74fbca0", + "metadata": {}, + "outputs": [], + "source": [ + "from litellm import completion\n", + "import gradio as gr\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "import os\n", + "import requests\n", + "import json" + ] + }, + { + "cell_type": "markdown", + "id": "d24be370-5347-47fb-a58e-21a1b5409ab2", + "metadata": {}, + "source": [ + "#### Load API Keys" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "e03afbe9-16aa-434c-a701-b3bfe75e927d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AIzaSyDC\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GEMINI_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + " # import google.generativeai\n", + " # google.generativeai.configure()\n", + "else:\n", + " print(\"Gemini API Key not set\")" + ] + }, + { + "cell_type": "markdown", + "id": "66e46447-0e73-49ef-944a-d1e8fae4986e", + "metadata": {}, + "source": [ + "### Use LiteLLM to abstract out the model provider" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "473c2029-ca74-4f1e-92ac-05f7817ff7df", + "metadata": {}, + "outputs": [], + "source": [ + "def call_llm(model, system_prompt, user_prompt, json_format_response=False, streaming=False):\n", + " if DEBUG_OUTPUT: \n", + " print(\"call_llm()\")\n", + " print(f\"streaming={streaming}\")\n", + " print(f\"json_format_response={json_format_response}\")\n", + " \n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + " payload = {\n", + " \"model\": model,\n", + " \"messages\": messages\n", + " }\n", + " # Use Json Reponse Format\n", + " # Link: https://docs.litellm.ai/docs/completion/json_mode\n", + " if json_format_response:\n", + " payload[\"response_format\"]: { \"type\": \"json_object\" }\n", + " \n", + " if streaming:\n", + " payload[\"stream\"] = True\n", + " response = completion(**payload)\n", + " # Return a generator expression instead of using yield in the function\n", + " return (part.choices[0].delta.content or \"\" for part in response)\n", + " else:\n", + " response = completion(**payload)\n", + " return response[\"choices\"][0][\"message\"][\"content\"]" + ] + }, + { + "cell_type": "markdown", + "id": "f45e0972-a6a0-4237-8a69-e6f165f30e0d", + "metadata": {}, + "source": [ + "### Brochure building functions" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "id": "c76d4ff9-0f18-49d0-a9b5-2c6c0bad359a", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "id": "ff41b687-3a46-4bca-a031-1148b91a4fdf", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url, model):\n", + " if DEBUG_OUTPUT:\n", + " print(\"get_links()\")\n", + " website = Website(url)\n", + "\n", + " link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + " You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + " such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + " link_system_prompt += \"You should respond in raw JSON exactly as specified in this example. DO NOT USE MARKDOWN.\"\n", + " link_system_prompt += \"\"\"\n", + " {\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + " }\n", + " \"\"\"\n", + " \n", + " result = call_llm(model=model, \n", + " system_prompt=link_system_prompt, \n", + " user_prompt=get_links_user_prompt(website), \n", + " json_format_response=True, \n", + " streaming=False)\n", + " if DEBUG_OUTPUT:\n", + " print(result)\n", + " return json.loads(result)\n", + "\n", + "def get_links_user_prompt(website):\n", + " if DEBUG_OUTPUT:\n", + " print(\"get_links_user_prompt()\")\n", + " \n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + "\n", + " if DEBUG_OUTPUT:\n", + " print(user_prompt)\n", + " \n", + " return user_prompt\n", + "\n", + "def get_all_details(url, model):\n", + " if DEBUG_OUTPUT:\n", + " print(\"get_all_details()\")\n", + " \n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = get_links(url, model)\n", + " if DEBUG_OUTPUT:\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result\n", + "\n", + "def get_brochure_user_prompt(company_name, url, model):\n", + " \n", + " if DEBUG_OUTPUT:\n", + " print(\"get_brochure_user_prompt()\")\n", + " \n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += get_all_details(url, model)\n", + " user_prompt = user_prompt[:5000] # Truncate if more than 5,000 characters\n", + " return user_prompt\n" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "id": "cf7512a1-a498-44e8-a234-9affb72efe60", + "metadata": {}, + "outputs": [], + "source": [ + "def create_brochure(company_name, url, model, streaming):\n", + "\n", + " system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "Include details of company culture, customers and careers/jobs if you have the information.\"\n", + " if streaming:\n", + " result = call_llm(model=model, system_prompt=system_prompt, user_prompt=get_brochure_user_prompt(company_name, url, model), streaming=True)\n", + " return (p for p in result)\n", + " else: \n", + " return call_llm(model=model, system_prompt=system_prompt, user_prompt=get_brochure_user_prompt(company_name, url, model), streaming=False)\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "ecb6d212-ddb6-4170-81bf-8f3ea54479f8", + "metadata": {}, + "source": [ + "#### Testing Model before implenting Gradio" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "id": "de89843a-08ac-4431-8c83-21a93c05f764", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Rio Tinto: Providing the Materials for a Sustainable Future\n", + "\n", + "## About Rio Tinto\n", + "\n", + "Rio Tinto is a global mining and metals company, operating in 35 countries with over 60,000 employees. Their purpose is to find better ways to provide the materials the world needs. Continuous improvement and innovation are at the core of their DNA, as they work to responsibly supply the metals and minerals critical for urbanization and the transition to a low-carbon economy.\n", + "\n", + "## Our Products\n", + "\n", + "Rio Tinto's diverse portfolio includes:\n", + "\n", + "- Iron Ore: The primary raw material used to make steel, which is strong, long-lasting and cost-efficient.\n", + "- Aluminium: A lightweight, durable and recyclable metal.\n", + "- Copper: A tough, malleable, corrosion-resistant and recyclable metal that is an excellent conductor of heat and electricity.\n", + "- Lithium: The lightest of all metals, a key element for low-carbon technologies.\n", + "- Diamonds: Ethically-sourced, high-quality diamonds.\n", + "\n", + "## Sustainability and Innovation\n", + "\n", + "Sustainability is at the heart of Rio Tinto's operations. They are targeting net zero emissions by 2050 and investing in nature-based solutions to complement their decarbonization efforts. Innovation is a key focus, with research and development into new technologies to improve efficiency and reduce environmental impact.\n", + "\n", + "## Careers and Culture\n", + "\n", + "Rio Tinto values its 60,000 employees and is committed to fostering a diverse and inclusive workplace. They offer a wide range of career opportunities, from mining and processing to engineering, finance, and more. Rio Tinto's culture is centered on safety, collaboration, and continuous improvement, with a strong emphasis on sustainability and responsible business practices.\n", + "\n", + "## Conclusion\n", + "\n", + "Rio Tinto is a global leader in the mining and metals industry, providing the materials essential for a sustainable future. Through their commitment to innovation, sustainability, and their talented workforce, Rio Tinto is well-positioned to meet the world's growing demand for critical resources.\n", + "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n", + "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n", + "\n", + ". at 0x7f80ca5da0c0>\n" + ] + } + ], + "source": [ + "MODEL=\"claude-3-haiku-20240307\"\n", + "DEBUG_OUTPUT=False\n", + "streaming=True\n", + "result = create_brochure(company_name=\"Rio Tinto\", url=\"http://www.riotinto.com\", model=MODEL, streaming=streaming)\n", + "\n", + "if streaming:\n", + " for chunk in result:\n", + " print(chunk, end=\"\", flush=True)\n", + "else:\n", + " print(result)\n" + ] + }, + { + "cell_type": "markdown", + "id": "1f330c92-6280-4dae-b4d8-717a56edb236", + "metadata": {}, + "source": [ + "#### Gradio Setup\n", + "Associate Dropdown values with the model we want to use.\n", + "Link: https://www.gradio.app/docs/gradio/dropdown#initialization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d2f38862-3728-4bba-9e16-6f9fab276145", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "DEBUG_OUTPUT=True\n", + "view = gr.Interface(\n", + " fn=create_brochure,\n", + " inputs=[\n", + " gr.Textbox(label=\"Company name:\"),\n", + " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", + " gr.Dropdown(choices=[(\"GPT 4o Mini\", \"gpt-4o-mini\"), \n", + " (\"Claude Haiku 3\", \"claude-3-haiku-20240307\"), \n", + " (\"Gemini 2.0 Flash\", \"gemini/gemini-2.0-flash\")], \n", + " label=\"Select model\"),\n", + " gr.Checkbox(label=\"Stream\")\n", + " ],\n", + " outputs=[gr.Markdown(label=\"Brochure:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0981136-2067-43b8-b17d-83560dd609ce", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 7bd46ba43b75f5a7958a291aa9d8f036d463ea1d Mon Sep 17 00:00:00 2001 From: habibmir808 Date: Mon, 2 Jun 2025 19:40:49 +0600 Subject: [PATCH 15/23] user can summarize research papers by website link --- .../day1_summarize_research_papers.ipynb | 307 ++++++++++++++++++ 1 file changed, 307 insertions(+) create mode 100644 week1/community-contributions/day1_summarize_research_papers.ipynb diff --git a/week1/community-contributions/day1_summarize_research_papers.ipynb b/week1/community-contributions/day1_summarize_research_papers.ipynb new file mode 100644 index 0000000..246da69 --- /dev/null +++ b/week1/community-contributions/day1_summarize_research_papers.ipynb @@ -0,0 +1,307 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "3ba06289-d17a-4ccd-85f5-2b79956d4e59", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install selenium" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb6636be-e43f-4896-aadd-cafda003ed4e", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q -U google-genai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dfe66209-1d33-4292-80f1-20e11baf4bc3", + "metadata": {}, + "outputs": [], + "source": [ + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.chrome.service import Service\n", + "from bs4 import BeautifulSoup\n", + "import time\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from google import genai\n", + "from google.genai import types\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2b4306c-17d0-46fe-a889-7440ff809dc6", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "#load env\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('GEMINI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "markdown", + "id": "08ec6fec-886c-4a0c-a046-e8643ad700d3", + "metadata": {}, + "source": [ + "# Lets make a simple call for check our model is working fine or not" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89143d5c-0013-4f7e-8e1f-f7db7e936f0d", + "metadata": {}, + "outputs": [], + "source": [ + "client = genai.Client(api_key=api_key)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1144b77a-6785-479a-ab4f-bb0ab5624b49", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "response = client.models.generate_content(\n", + " model=\"gemini-2.5-flash-preview-05-20\",\n", + " contents=[\"hi gemini\"]\n", + ")\n", + "print(response.text)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bbf3836c-19b8-44e1-904a-f265925c2786", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "class Website:\n", + " def __init__(self, url, driver_path=None, wait_time=3):\n", + " self.url = url\n", + " self.wait_time = wait_time\n", + "\n", + " # Headless Chrome settings\n", + " options = Options()\n", + " # options.add_argument(\"--headless\") \n", + " # Headless mode runs the browser in the background (invisible).\n", + " # However, some websites (like openai.com) block headless browsers.\n", + " # So if this line is active, the page may not load correctly and you may not get the full content.\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--window-size=1920x1080\")\n", + "\n", + " # Driver path\n", + " if driver_path:\n", + " service = Service(executable_path=driver_path)\n", + " else:\n", + " service = Service() \n", + "\n", + " # Start browser\n", + " driver = webdriver.Chrome(service=service, options=options)\n", + " driver.get(url)\n", + "\n", + " # Wait for the loading page\n", + " time.sleep(self.wait_time)\n", + "\n", + " # Take page source\n", + " html = driver.page_source\n", + " driver.quit()\n", + "\n", + " # Analysis with BeautifulSoup \n", + " soup = BeautifulSoup(html, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + "\n", + " # Clean irrelevant tags\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + "\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "852c52e2-bd4d-4bb9-94ef-e498c33f1a89", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"You are an academic research assistant specialized in summarizing scholarly papers. Follow this workflow rigorously:\n", + "\n", + "Step 1: Document Verification\n", + "Verify if the input is a research paper by checking for:\n", + "\n", + "Presence of academic sections (Abstract, Introduction, Methodology, Results, Discussion, References)\n", + "\n", + "Technical/scholarly language\n", + "\n", + "Citations (in-text or bibliography)\n", + "\n", + "Research claims or data analysis\n", + "If NOT a research paper:\n", + "→ Respond: \"This doesn't appear to be a research paper. Please upload peer-reviewed academic literature for summarization.\"\n", + "\n", + "Step 2: Structured Summary (If verified)\n", + "Generate a 5-section summary in this exact format:\n", + "\n", + "1. Research Question\n", + "[Identify core problem/gap addressed in 1 sentence]\n", + "\n", + "2. Methodology\n", + "[Study design, data sources, analytical techniques in 2 bullet points]\n", + "\n", + "3. Key Findings\n", + "[3-4 quantified results with numerical evidence from tables/figures]\n", + "\n", + "4. Limitations\n", + "[2 major constraints acknowledged by authors]\n", + "\n", + "5. Significance\n", + "[Impact on field & practical implications in 1 sentence]\n", + "\n", + "Critical Rules:\n", + "Accuracy Priority: Never invent data. Write \"Not specified\" for missing elements\n", + "\n", + "Source Anchoring: Cite page/paragraph numbers for claims (e.g., \"Fig 3 shows 24% improvement\")\n", + "\n", + "Jargon Handling: Simplify complex terms using: [Technical Term → Layman Explanation] inline\n", + "\n", + "Bias Alert: Flag any undeclared funding/sponsorship conflicts\n", + "\n", + "Output Format: Strict Markdown with section headers, 200-word maximum\n", + "\n", + "Example Output:\n", + "1. Research Question\n", + "How does microplastic concentration affect zebrafish neural development?\n", + "\n", + "2. Methodology\n", + "\n", + "Exposed embryos to 0.1-10μm PET particles (5-100mg/L) for 96h\n", + "\n", + "Quantified gene expression (RT-qPCR) and behavioral assays (Open Field Test)\n", + "\n", + "3. Key Findings\n", + "▲ 40% reduction in neuron count at 50mg/L exposure (p<0.01, Fig 2B)\n", + "■ 2.3x increase in anxiolytic behavior (Table 3)\n", + "▼ 17% downregulation in shha expression (p=0.03)\n", + "\n", + "4. Limitations\n", + " \n", + "Used static exposure vs dynamic aquatic environments\n", + "\n", + "Limited proteomic validation\n", + "\n", + "5. Significance\n", + "Establishes dose-dependent neurotoxicity thresholds for aquatic toxicology regulations.\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7620c685-c35c-4d6b-aaf1-a3da98f19ca7", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a summary of this website in markdown.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a4257406-089b-45a3-bfb5-272004360a49", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize(url):\n", + " website = Website(url)\n", + " response = client.models.generate_content(\n", + " model=\"gemini-2.5-flash-preview-05-20\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=system_prompt),\n", + " contents=user_prompt_for(website)\n", + " )\n", + "\n", + " return response.text\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f68b32ae-9e65-4aa4-ae8d-cc2482c4a2e2", + "metadata": {}, + "outputs": [], + "source": [ + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae52543c-01c1-4262-b53c-95ef4e5a93aa", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://onlinelibrary.wiley.com/doi/full/10.1155/2021/8812542\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 65db703e2db3e2e9a1981ec9877519ad8f6fad95 Mon Sep 17 00:00:00 2001 From: Ekta Shukla Date: Mon, 2 Jun 2025 23:43:20 +0530 Subject: [PATCH 16/23] Add LLM product comparison using Selenium, OpenAI, and Ollama --- ...ay1_product_comparison_openai_ollama.ipynb | 226 ++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 community-contributions/day1_product_comparison_openai_ollama.ipynb diff --git a/community-contributions/day1_product_comparison_openai_ollama.ipynb b/community-contributions/day1_product_comparison_openai_ollama.ipynb new file mode 100644 index 0000000..6c76c1f --- /dev/null +++ b/community-contributions/day1_product_comparison_openai_ollama.ipynb @@ -0,0 +1,226 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "38795b24-9801-4cfb-a000-ccd7f41e6128", + "metadata": {}, + "source": [ + "\n", + "# 🧠 Multi-Product Competitor Intelligence Summarizer using Web Scraping + LLM\n", + "\n", + "This notebook scrapes product pages using `Selenium`, collects the product information, and summarizes key features and comparison insights using `Ollama (LLaMA3) and OpenAI`.\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "\n", + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"Summarize the following product information for comparison.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38245e18", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# 📦 Install required packages (run once)\n", + "!pip install selenium bs4 requests\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88ae528b-aefe-4c64-b927-676e739194af", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d4a831a5", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize_with_openai(text, model=\"gpt-4o-mini\"):\n", + " response = openai.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": text}\n", + " ],\n", + " temperature=0.7\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef65cd72", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# ⚙️ Selenium setup (headless)\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.common.by import By\n", + "import time\n", + "\n", + "def scrape_text_from_url(url):\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " driver = webdriver.Chrome(options=options)\n", + " driver.get(url)\n", + " time.sleep(3)\n", + " \n", + " # You can tune this selector depending on the site\n", + " body = driver.find_element(By.TAG_NAME, 'body')\n", + " text = body.text\n", + " driver.quit()\n", + " return text.strip()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36e19014", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# 🧠 LLM Prompting using Ollama (local llama3)\n", + "import subprocess\n", + "\n", + "def summarize_with_ollama(text):\n", + " prompt = f\"Summarize the following product description:\\n\\n{text}\\n\\nSummary:\"\n", + " try:\n", + " print(\"inside ollama\")\n", + " result = subprocess.run(\n", + " [\"ollama\", \"run\", \"llama3.2\"],\n", + " input=prompt,\n", + " capture_output=True, text=True, check=True, encoding=\"utf-8\"\n", + " )\n", + " print(\"git result\")\n", + " return result.stdout.strip()\n", + " except subprocess.CalledProcessError as e:\n", + " return f\"Error running ollama: {e.stderr}\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e04cea6e", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# 🔁 Analyze multiple product URLs and summarize\n", + "product_urls = {\n", + " \"iPhone 15 Pro\": \"https://www.apple.com/in/iphone-15-pro/\",\n", + " \"Samsung S24 Ultra\": \"https://www.samsung.com/in/smartphones/galaxy-s24-ultra/\",\n", + "}\n", + "\n", + "product_texts = {}\n", + "\n", + "for name, url in product_urls.items():\n", + " print(f\"Scraping {name} ...\")\n", + " product_texts[name] = scrape_text_from_url(url)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ebd5a20", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# 📄 Display side-by-side summaries\n", + "for name, text in product_texts.items():\n", + " print(f\"\\n🔹 {name} Summary with Ollama:\")\n", + " print(summarize_with_ollama(text))\n", + "\n", + " print(f\"\\n🔹 {name} Summary with OpenAI GPT:\")\n", + " print(summarize_with_openai(text))\n", + " print(\"=\"*100)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "935e0081-ccf5-4d9a-a984-ee82c77c04a2", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 41ae0bdc24ecea4ed4bdfb5d91ccd22ffc467336 Mon Sep 17 00:00:00 2001 From: Adriana394 <158718290+Adriana394@users.noreply.github.com> Date: Tue, 3 Jun 2025 15:16:50 +0200 Subject: [PATCH 17/23] create community folder --- .../testing_fine_tuned_model_with_rag.py | 258 ++++++++++++++++++ 1 file changed, 258 insertions(+) create mode 100644 week7/community_contributions/price_prediction_with_RAG/testing_fine_tuned_model_with_rag.py diff --git a/week7/community_contributions/price_prediction_with_RAG/testing_fine_tuned_model_with_rag.py b/week7/community_contributions/price_prediction_with_RAG/testing_fine_tuned_model_with_rag.py new file mode 100644 index 0000000..22c775d --- /dev/null +++ b/week7/community_contributions/price_prediction_with_RAG/testing_fine_tuned_model_with_rag.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- +"""Testing Fine-tuned model with RAG + +Automatically generated by Colab. + +Original file is located at + https://colab.research.google.com/drive/1J8P8cwqwhBo3CNIZaEFe6BMRw0WUfEqy + +## Predict Product Prices + +### And now, to evaluate our fine-tuned open source model +""" + +!pip install -q datasets peft requests torch bitsandbytes transformers trl accelerate sentencepiece matplotlib langchain-community chromadb + +import os +import re +import math + +from google.colab import userdata + +from huggingface_hub import login + +import torch +import torch.nn.functional as F + +from transformers import ( + AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, GenerationConfig) + +from datasets import load_dataset + +from peft import PeftModel + +from sentence_transformers import SentenceTransformer +from langchain.vectorstores import Chroma +from langchain.embeddings import HuggingFaceEmbeddings + +import matplotlib.pyplot as plt + +# Commented out IPython magic to ensure Python compatibility. +# Constants + +BASE_MODEL = "meta-llama/Llama-3.1-8B" +PROJECT_NAME = "pricer" +HF_USER = "Adriana213" + +RUN_NAME = "optim-20250514_061529" +PROJECT_RUN_NAME = f"{PROJECT_NAME}-{RUN_NAME}" + +FINETUNED_MODEL = f"{HF_USER}/{PROJECT_RUN_NAME}" + +# Data + +DATASET_NAME = f"{HF_USER}/pricer-data" + +# Hyperparameters for QLoRA + +QUANT_4_BIT = True + +# %matplotlib inline + +# Used for writing to output in color + +GREEN = "\033[92m" +YELLOW = "\033[93m" +RED = "\033[91m" +RESET = "\033[0m" +COLOR_MAP = {"red":RED, "orange": YELLOW, "green": GREEN} + +"""### Log in to HuggingFace + + +""" + +hf_token = userdata.get('HF_TOKEN') +login(hf_token, add_to_git_credential=True) + +dataset = load_dataset(DATASET_NAME) +train = dataset['train'] +test = dataset['test'] + +test[0] + +"""## Now load the Tokenizer and Model""" + +if QUANT_4_BIT: + quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_quant_type="nf4" + ) +else: + quant_config = BitsAndBytesConfig( + load_in_8bit=True, + bnb_8bit_compute_dtype=torch.bfloat16 + ) + +# Load the Tokenizer and the Model + +tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True) +tokenizer.pad_token = tokenizer.eos_token +tokenizer.padding_side = "right" + +base_model = AutoModelForCausalLM.from_pretrained( + BASE_MODEL, + quantization_config=quant_config, + device_map="auto", +) +base_model.generation_config.pad_token_id = tokenizer.pad_token_id + +# Load the fine-tuned model with PEFT + +fine_tuned_model = PeftModel.from_pretrained(base_model, FINETUNED_MODEL) + + +print(f"Memory footprint: {fine_tuned_model.get_memory_footprint() / 1e6:.1f} MB") + +fine_tuned_model + +"""# Evaluation""" + +def extract_price(s): + if "Price is $" in s: + contents = s.split("Price is $")[1] + contents = contents.replace(',','') + match = re.search(r"[-+]?\d*\.\d+|\d+", contents) + return float(match.group()) if match else 0 + return 0 + +extract_price("Price is $a fabulous 899.99 or so") + +# Original prediction function takes the most likely next token + +def model_predict(prompt): + inputs = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + attention_mask = torch.ones(inputs.shape, device="cuda") + outputs = fine_tuned_model.generate(inputs, attention_mask=attention_mask, max_new_tokens=3, num_return_sequences=1) + response = tokenizer.decode(outputs[0]) + return extract_price(response) + +# top_K = 3 + +# def improved_model_predict(prompt, device="cuda"): +# set_seed(42) +# inputs = tokenizer.encode(prompt, return_tensors="pt").to(device) +# attention_mask = torch.ones(inputs.shape, device=device) + +# with torch.no_grad(): +# outputs = fine_tuned_model(inputs, attention_mask=attention_mask) +# next_token_logits = outputs.logits[:, -1, :].to('cpu') + +# next_token_probs = F.softmax(next_token_logits, dim=-1) +# top_prob, top_token_id = next_token_probs.topk(top_K) +# prices, weights = [], [] +# for i in range(top_K): +# predicted_token = tokenizer.decode(top_token_id[0][i]) +# probability = top_prob[0][i] +# try: +# result = float(predicted_token) +# except ValueError as e: +# result = 0.0 +# if result > 0: +# prices.append(result) +# weights.append(probability) +# if not prices: +# return 0.0, 0.0 +# total = sum(weights) +# weighted_prices = [price * weight / total for price, weight in zip(prices, weights)] +# return sum(weighted_prices).item() + +embedder = HuggingFaceEmbeddings(model_name = "all-MiniLM-L6-v2") +chroma = Chroma( + persist_directory = "chroma_train_index", + embedding_function = embedder +) + +gen_config = GenerationConfig(max_new_tokens=10, do_sample=False) + +def predict_price_rag(desc: str, k: int = 3) -> float: + docs = chroma.similarity_search(desc, k=k) + shots = "\n\n".join(f"Description: {d.page_content}\nPrice is ${d.metadata['price']}" + for d in docs) + prompt = f"{shots}\n\nDescription: {desc}\nPrice is $" + inp = tokenizer(prompt, return_tensors="pt").to(fine_tuned_model.device) + out = fine_tuned_model.generate(**inp, generation_config=gen_config) + txt = tokenizer.decode(out[0, inp["input_ids"].shape[-1]:], skip_special_tokens=True).strip() + return float(re.findall(r"\d+\.?\d+", txt)[0]) + +class Tester: + + def __init__(self, predictor, data, title=None, size=250): + self.predictor = predictor + self.data = data + self.title = title or predictor.__name__.replace("_", " ").title() + self.size = size + self.guesses = [] + self.truths = [] + self.errors = [] + self.sles = [] + self.colors = [] + + def color_for(self, error, truth): + if error<40 or error/truth < 0.2: + return "green" + elif error<80 or error/truth < 0.4: + return "orange" + else: + return "red" + + def run_datapoint(self, i): + datapoint = self.data[i] + guess = self.predictor(datapoint["text"]) + truth = datapoint["price"] + error = abs(guess - truth) + log_error = math.log(truth+1) - math.log(guess+1) + sle = log_error ** 2 + color = self.color_for(error, truth) + title = datapoint["text"].split("\n\n")[1][:20] + "..." + self.guesses.append(guess) + self.truths.append(truth) + self.errors.append(error) + self.sles.append(sle) + self.colors.append(color) + print(f"{COLOR_MAP[color]}{i+1}: Guess: ${guess:,.2f} Truth: ${truth:,.2f} Error: ${error:,.2f} SLE: {sle:,.2f} Item: {title}{RESET}") + + def chart(self, title): + max_error = max(self.errors) + plt.figure(figsize=(12, 8)) + max_val = max(max(self.truths), max(self.guesses)) + plt.plot([0, max_val], [0, max_val], color='deepskyblue', lw=2, alpha=0.6) + plt.scatter(self.truths, self.guesses, s=3, c=self.colors) + plt.xlabel('Ground Truth') + plt.ylabel('Model Estimate') + plt.xlim(0, max_val) + plt.ylim(0, max_val) + plt.title(title) + plt.show() + + def report(self): + average_error = sum(self.errors) / self.size + rmsle = math.sqrt(sum(self.sles) / self.size) + hits = sum(1 for color in self.colors if color=="green") + title = f"{self.title} Error=${average_error:,.2f} RMSLE={rmsle:,.2f} Hits={hits/self.size*100:.1f}%" + self.chart(title) + + def run(self): + self.error = 0 + for i in range(self.size): + self.run_datapoint(i) + self.report() + + @classmethod + def test(cls, function, data): + cls(function, data).run() + +Tester.test(predict_price_rag, test) \ No newline at end of file From bfc20be33cd9b425b82986d419d5f6c00cd221a2 Mon Sep 17 00:00:00 2001 From: Adriana394 <158718290+Adriana394@users.noreply.github.com> Date: Tue, 3 Jun 2025 15:33:14 +0200 Subject: [PATCH 18/23] Create new_training_with_rag (1).py --- .../new_training_with_rag (1).py | 262 ++++++++++++++++++ 1 file changed, 262 insertions(+) create mode 100644 week7/community_contributions/price_prediction_with_RAG/new_training_with_rag (1).py diff --git a/week7/community_contributions/price_prediction_with_RAG/new_training_with_rag (1).py b/week7/community_contributions/price_prediction_with_RAG/new_training_with_rag (1).py new file mode 100644 index 0000000..49feb73 --- /dev/null +++ b/week7/community_contributions/price_prediction_with_RAG/new_training_with_rag (1).py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +"""new_training_with_RAG.ipynb + +Automatically generated by Colab. + +Original file is located at + https://colab.research.google.com/drive/1gi8FPI1dtnxBNTf86JdmXQ0BYqnKz7LS + +# Predict Product Prices +""" + +!nvidia-smi + +!pip install -q datasets requests torch peft bitsandbytes transformers trl accelerate sentencepiece matplotlib langchain-community chromadb + +import os +import re +import math +from tqdm import tqdm +from google.colab import userdata +from huggingface_hub import login +import torch +import transformers +from transformers import ( + AutoModelForCausalLM, AutoTokenizer, TrainingArguments, + set_seed, BitsAndBytesConfig, GenerationConfig) + +from datasets import load_dataset +from peft import LoraConfig, PeftModel +from trl import SFTTrainer, SFTConfig +from datetime import datetime +import matplotlib.pyplot as plt + +#LangChain & RAG Imports + +from sentence_transformers import SentenceTransformer +from langchain.schema import Document +from langchain.vectorstores import Chroma +import chromadb +from langchain.embeddings import HuggingFaceEmbeddings + +# Commented out IPython magic to ensure Python compatibility. +# Constants + +BASE_MODEL = "meta-llama/Meta-Llama-3.1-8B" +#BASE_MODEL = 'mistralai/Mistral-7B-Instruct-v0.1' +PROJECT_NAME = "pricer-optim" +HF_USER = "Adriana213" + +# Data + +DATASET_NAME = f"{HF_USER}/pricer-data" +MAX_SEQUENCE_LENGTH = 182 + + +RUN_NAME = f"{PROJECT_NAME}-{datetime.now():%Y%m%d_%H%M%S}" + +HUB_MODEL_NAME = f"{HF_USER}/{RUN_NAME}" + +# Hyperparameters for QLoRA + +LORA_R = 8 +LORA_ALPHA = 32 +TARGET_MODULES = ["q_proj", "v_proj", "k_proj", "o_proj"] +LORA_DROPOUT = 0.10 +QUANT_4_BIT = True + +# Hyperparameters for Training + +EPOCHS = 2 +BATCH_SIZE = 16 +GRADIENT_ACCUMULATION_STEPS = 1 +LEARNING_RATE = 2e-4 +LR_SCHEDULER_TYPE = 'cosine' +WARMUP_RATIO = 0.05 +OPTIMIZER = "paged_adamw_32bit" +STEPS = 50 +SAVE_STEPS = 200 +EVAL_STEPS = 200 # kept for potential future use + +# %matplotlib inline + +HUB_MODEL_NAME + +"""### Log in to HuggingFace & get Data""" + +hf_token = userdata.get('HF_TOKEN') +login(hf_token, add_to_git_credential=True) + +torch.cuda.empty_cache() + +dataset = load_dataset(DATASET_NAME) +train = dataset['train'] +test = dataset['test'] + +"""## Now load the Tokenizer and Model + +The model is "quantized" - we are reducing the precision to 4 bits. +""" + +# Pick the right quantization + +if QUANT_4_BIT: + quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_quant_type="nf4" + ) +else: + quant_config = BitsAndBytesConfig( + load_in_8bit=True, + bnb_8bit_compute_dtype=torch.bfloat16 + ) + +# Load the Tokenizer and the Model + +tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True) +tokenizer.pad_token = tokenizer.eos_token +tokenizer.padding_side = "right" + +base_model = AutoModelForCausalLM.from_pretrained( + BASE_MODEL, + quantization_config=quant_config, + device_map="auto", +) + +base_model.generation_config.pad_token_id = tokenizer.pad_token_id + +print(f"Memory footprint: {base_model.get_memory_footprint() / 1e6:.1f} MB") + +"""# Data Collator + +""" + +from trl import DataCollatorForCompletionOnlyLM + +response_template = "Price is $" +collator = DataCollatorForCompletionOnlyLM(response_template, + tokenizer=tokenizer) + +"""# Set up the configuration for Training""" + +# LoRA Config + +lora_parameters = LoraConfig( + lora_alpha = LORA_ALPHA, + lora_dropout = LORA_DROPOUT, + r = LORA_R, + bias = "none", + task_type = "CAUSAL_LM", + target_modules = TARGET_MODULES, +) + +# Training Config + +train_parameters = SFTConfig( + output_dir = RUN_NAME, + num_train_epochs = EPOCHS, + per_device_train_batch_size = BATCH_SIZE, + per_device_eval_batch_size = 4, + eval_strategy = "no", + eval_steps = EVAL_STEPS, + gradient_accumulation_steps = GRADIENT_ACCUMULATION_STEPS, + optim = OPTIMIZER, + save_steps = SAVE_STEPS, + save_total_limit = 5, + logging_steps = 50, + learning_rate = LEARNING_RATE, + weight_decay = 0.01, + fp16=False, + bf16=True, + max_grad_norm=0.3, + max_steps=-1, + warmup_ratio = WARMUP_RATIO, + group_by_length=True, + lr_scheduler_type = LR_SCHEDULER_TYPE, + run_name = RUN_NAME, + max_seq_length = MAX_SEQUENCE_LENGTH, + dataset_text_field = "text", + save_strategy = "steps", + hub_strategy = "every_save", + push_to_hub = True, + hub_model_id = HUB_MODEL_NAME, + hub_private_repo = True, + report_to = 'none', +) + + +fine_tuning = SFTTrainer( + model = base_model, + train_dataset = train, + eval_dataset=test, + peft_config = lora_parameters, + args = train_parameters, + data_collator = collator, + ) + +"""## Fine Tuning""" + +fine_tuning.train() + +fine_tuning.model.push_to_hub(RUN_NAME, private=True) +print(f"Saved to the hub: {RUN_NAME}") + +"""# Implement RAG""" + +HF_USER = "Adriana213" +RUN_NAME = "pricer-optim-20250514_061529" +fine_tuned_model = PeftModel.from_pretrained(base_model, f"{HF_USER}/{RUN_NAME}") +print(f"✅ Loaded fine-tuned adapter: {HF_USER}/{RUN_NAME}") + +base_model = fine_tuned_model + +"""## Build Chroma index""" + +docs = [ + Document(page_content=text, metadata = {'price': price}) + for text, price in zip(train['text'], train['price']) +] + +# Create embeddings & persist Chroma index + +embedding = HuggingFaceEmbeddings(model_name = 'all-MiniLM-L6-v2') +chroma = Chroma.from_documents( + documents = docs, + embedding = embedding, + persist_directory = 'chroma_train_index' +) + +chroma.persist() +print('Chroma index built and persisted.') + +"""## RAG Prediction Function""" + +generation_config = GenerationConfig( + max_new_token = 10, + do_sample = False, + temperature = 0.1 +) + +def predict_price_rag(desc: str, k: int = 3) -> float: + hits = chroma.similarity_search(desc, k = k) + shot_strs = [ + f'Description: {doc.page_content}\nPrice is ${doc.metadata["price"]}' + for doc in hits + ] + + prompt = "\n\n".join(shot_strs) + f"\n\nDescription: {desc}\nPrice is $" + + inputs = tokenizer(prompt, return_tensors="pt").to(base_model.device) + out = base_model.generate(**inputs, generation_config=generation_config) + text = tokenizer.decode( + out[0, inputs["input_ids"].shape[-1]:], + skip_special_tokens=True + ).strip() + return float(re.findall(r"\d+\.?\d+", text)[0]) + +!zip -r chroma_index.zip chroma_train_index + +from google.colab import files +files.download("chroma_index.zip") \ No newline at end of file From fdd5ad44cf469610da3493d20bad356b29a6485c Mon Sep 17 00:00:00 2001 From: renannovais Date: Wed, 4 Jun 2025 10:20:03 -0300 Subject: [PATCH 19/23] Add my notebook to community-contributions --- ...y1-webpage-summarizer-brazilian-news.ipynb | 247 ++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 week1/community-contributions/day1-webpage-summarizer-brazilian-news.ipynb diff --git a/week1/community-contributions/day1-webpage-summarizer-brazilian-news.ipynb b/week1/community-contributions/day1-webpage-summarizer-brazilian-news.ipynb new file mode 100644 index 0000000..e108977 --- /dev/null +++ b/week1/community-contributions/day1-webpage-summarizer-brazilian-news.ipynb @@ -0,0 +1,247 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 15, + "id": "8ce13728-0040-43cc-82cd-e10c838ef71c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🌍 Detected language: PT\n", + "🔗 Preview of extracted text:\n", + "\n", + "ITASAT2 irá atuar para aplicações científicas e de defesa\n", + "Publicado em 14/04/2025 - 14h15\n", + "O Instituto Tecnológico de Aeronáutica (ITA) realizou, entre os dias 17 e 19 de março, a Revisão Preliminar de Projeto (PDR) do ITASAT 2, novo microssatélite em desenvolvimento por pesquisadores do Centro Espacial ITA (CEI). A atividade representa uma etapa importante dos estudos e contou com a presença de instituições parceiras, tanto do Brasil quanto do exterior.\n", + "Participaram do encontro representantes do\n", + "...\n", + "\n", + "Amount of words: 526\n", + "\n", + "\n", + "📊 Usage Report\n", + "🧾 Prompt tokens: 927\n", + "🧠 Completion tokens: 309\n", + "🔢 Total tokens: 1236\n", + "💰 Total cost: $0.000927\n", + "\n", + "\n", + "\n" + ] + }, + { + "data": { + "text/markdown": [ + "# 📝 Summary\n", + "\n", + "The ITA (Instituto Tecnológico de Aeronáutica) is working on the ITASAT 2 project, a new microsatellite geared towards scientific and defense applications! 🌟 This initiative was highlighted at the Preliminary Design Review (PDR) held from March 17 to 19, with participation from notable organizations such as NASA and the Brazilian Space Agency (AEB). This is a fantastic collaboration that spans both domestic and international partnerships – how exciting is that? \n", + "\n", + "ITASAT 2 will consist of a constellation of three CubeSats focusing on monitoring the Earth's ionosphere and assessing plasma bubble formation. Interestingly, it also has defense applications such as geolocating radio frequency sources and optical identification of uncooperative vessels – a crucial capability for maritime security!\n", + "\n", + "The PDR showcased the team's technical and managerial capabilities, receiving unanimous approval to proceed with the project. It’s great to see such thorough preparation reflecting the dedication of the ITA team! \n", + "\n", + "The CubeSats themselves are cubic nano or microsatellites, and the ITASAT 2 is of the 16U variety, meaning it's made up of 16 units measuring 10 cm each – just amazing how compact these technologies can be! Additionally, the CEI is also developing another CubeSat called SelenITA, which will contribute to NASA's Artemis mission to study the Moon! 🌕\n", + "\n", + "Keep an eye on this remarkable project as it continues to develop – the future of space exploration and defense technology looks bright! 🚀" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Import Libraries\n", + "import os\n", + "import requests\n", + "from openai import OpenAI\n", + "\n", + "from bs4 import BeautifulSoup\n", + "from langdetect import detect, LangDetectException\n", + "from dotenv import load_dotenv\n", + "\n", + "from IPython.display import Markdown, display\n", + "\n", + "# Load .env variables\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "if not openai_api_key:\n", + " raise ValueError(\"⚠️ OPENAI_API_KEY not found in .env file.\")\n", + "\n", + "# Generating object to work with GPT tasks \n", + "openai = OpenAI()\n", + "\n", + "# Class to work with text extraction, processing and summarizing from a given url\n", + "class WebPageSummarizer():\n", + " \"\"\"\n", + " Class to work with text extraction, processing and summarizing from a given url using the BeautifulSoup library. It also includes pricing.\n", + " \"\"\"\n", + " def __init__(self, url: str, summary_detail: str = \"high\", show_summary: bool = True, language_of_reference = \"English\", model: str = \"gpt-4o-mini\") -> None:\n", + "\n", + " # Initial summarizer settings\n", + " self.url = url\n", + " self.model = model\n", + " self.show_summary = show_summary\n", + " self.summary_detail = summary_detail\n", + " self.language_of_reference = language_of_reference\n", + " self.language_code_map = {\n", + " \"english\": \"en\",\n", + " \"portuguese\": \"pt\",\n", + " \"spanish\": \"es\",\n", + " \"french\": \"fr\",\n", + " \"german\": \"de\",\n", + " \"italian\": \"it\",\n", + " \"japanese\": \"ja\",\n", + " \"chinese\": \"zh\",\n", + " \"korean\": \"ko\",\n", + " }\n", + " \n", + " self.model_pricing = {\n", + " \"gpt-4o-mini\": {\"input\": 0.0005, \"output\": 0.0015},\n", + " \"gpt-4o\": {\"input\": 0.005, \"output\": 0.015},\n", + " \"gpt-4-turbo\": {\"input\": 0.01, \"output\": 0.03},\n", + " \"gpt-4\": {\"input\": 0.03, \"output\": 0.06}, # Rarely used now\n", + " \"gpt-3.5-turbo\": {\"input\": 0.0005, \"output\": 0.0015}\n", + " }\n", + "\n", + " self.headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n", + " \"(KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36\"\n", + " }\n", + "\n", + " if self.summary_detail not in [\"high\", \"low\"]:\n", + " raise Exception(\"\"\"Please select summary detail as either \"high\" or \"low\".\"\"\")\n", + "\n", + " def __extract_text(self):\n", + " response = requests.get(self.url, headers=self.headers)\n", + " if response.status_code != 200:\n", + " raise Exception(f\"Failed to fetch page. Status code: {response.status_code}\")\n", + " \n", + " soup = BeautifulSoup(response.text, \"html.parser\")\n", + " \n", + " # Try to extract meaningful content\n", + " paragraphs = soup.find_all(\"p\")\n", + " \n", + " # Join all paragraph text\n", + " self.text = \"\\n\".join([p.get_text() for p in paragraphs if p.get_text().strip() != \"\"])\n", + "\n", + " # Guarantee limit of text to summary\n", + " max_words = 7000\n", + " if len(self.text.split()) > max_words:\n", + " self.text = \" \".join(self.text.split()[:max_words])\n", + " \n", + " def __detect_language(self):\n", + " # Detect language\n", + " try:\n", + " self.language_url = detect(self.text)\n", + " except LangDetectException:\n", + " self.language_url = \"unknown\"\n", + "\n", + " # Normalize and resolve target language code\n", + " target_language_name = self.language_of_reference.lower().strip()\n", + " self.target_language_code = self.language_code_map.get(target_language_name)\n", + " \n", + " if not self.target_language_code:\n", + " raise ValueError(f\"❌ Unsupported language: {self.language_of_reference}. Please use one of: {list(LANGUAGE_CODE_MAP.keys())}\")\n", + "\n", + " print(f\"🌍 Detected language: {self.language_url.upper()}\")\n", + " \n", + " if self.show_summary:\n", + " print(\"🔗 Preview of extracted text:\\n\")\n", + " print(self.text[:500] + \"\\n...\\n\")\n", + " print(f\"Amount of words: {len(self.text.split())}\\n\")\n", + "\n", + " def __calculate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:\n", + " \"\"\"\n", + " Calculates total cost in USD based on selected model.\n", + " \"\"\"\n", + " pricing = self.model_pricing.get(self.model)\n", + " if pricing is None:\n", + " raise ValueError(f\"\"\"Pricing not available for model \"{self.model}\". Add it to model_pricing.\"\"\")\n", + " \n", + " input_cost = (prompt_tokens / 1000) * pricing[\"input\"]\n", + " output_cost = (completion_tokens / 1000) * pricing[\"output\"]\n", + " return input_cost + output_cost\n", + "\n", + " def summarize(self)-> str:\n", + " \"\"\"\n", + " Method to process user prompts in the context of the user.\n", + " \"\"\"\n", + " self.__extract_text()\n", + " self.__detect_language()\n", + " \n", + " # Prompt for system definition\n", + " self.system_prompt = f\"\"\" \n", + " You are an assistant that analyzes the contents of a website and provides a summary. \n", + " Please notice that providing a {self.summary_detail} summary detail is IMPORTANT.\n", + " If you find text that might be navigation related or ad related please ignore. Respond in markdown. \n", + " Also, can you please start your summary with the tile \"📝 Summary\"?\n", + " \n", + " Please show some excited behavior during your summary, making comments with extra knowledge if possible during or at the end of the sentence. \n", + " \"\"\"\n", + "\n", + " self.content = f\"\"\"The text to summarize is as follows: {self.text}\"\"\"\n", + "\n", + " if self.language_url != self.target_language_code:\n", + " self.system_prompt = f\"\"\"The website content is in {self.language_url.upper()}. Please first translate it to {self.language_of_reference}. \n", + " {self.system_prompt.strip()}\n", + " \"\"\"\n", + "\n", + " response = openai.chat.completions.create(model=self.model, messages=[{\"role\":\"system\", \"content\":self.system_prompt}, \n", + " {\"role\": \"user\", \"content\":self.content}])\n", + "\n", + " # Cost calculation and usage report\n", + " usage = response.usage\n", + " total_cost = self.__calculate_cost(usage.prompt_tokens, usage.completion_tokens)\n", + " \n", + " print(\"\\n📊 Usage Report\")\n", + " print(f\"🧾 Prompt tokens: {usage.prompt_tokens}\")\n", + " print(f\"🧠 Completion tokens: {usage.completion_tokens}\")\n", + " print(f\"🔢 Total tokens: {usage.total_tokens}\")\n", + " print(f\"💰 Total cost: ${total_cost:.6f}\\n\\n\\n\")\n", + "\n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "web_page_summarizer = WebPageSummarizer(\"http://www.ita.br/noticias/revisodeprojetodonovomicrossatlitedoitaaprovada\", summary_detail = \"low\")\n", + "display(Markdown(web_page_summarizer.summarize()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af5a186a-bb25-4cf4-a6d2-6034cd493bc4", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From ba1b3b702f44ef66555017767167845bdeecbe65 Mon Sep 17 00:00:00 2001 From: lisekarimi Date: Thu, 5 Jun 2025 16:20:51 +0200 Subject: [PATCH 20/23] Add week1 contributions --- .../01_webpage_summarizer.ipynb | 357 +++++++++++++++++ .../02_brochure_generator.ipynb | 370 ++++++++++++++++++ .../03_tech_explainer.ipynb | 142 +++++++ 3 files changed, 869 insertions(+) create mode 100644 week1/community-contributions/01_webpage_summarizer.ipynb create mode 100644 week1/community-contributions/02_brochure_generator.ipynb create mode 100644 week1/community-contributions/03_tech_explainer.ipynb diff --git a/week1/community-contributions/01_webpage_summarizer.ipynb b/week1/community-contributions/01_webpage_summarizer.ipynb new file mode 100644 index 0000000..f8be204 --- /dev/null +++ b/week1/community-contributions/01_webpage_summarizer.ipynb @@ -0,0 +1,357 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "53211323-6a09-452a-b471-98e22d92bfc2", + "metadata": {}, + "source": [ + "# 🌐 WebPage Summarizer\n", + "---\n", + "- 🌍 **Task:** Summarizing webpage content using AI. \n", + "- 🧠 **Model:** OpenAI's ``gpt-4o-mini`` and ``llama3.2:3b`` for text summarization. \n", + "- 🕵️‍♂️ **Data Extraction:** Selenium for handling both static and JavaScript-rendered websites. \n", + "- 📌 **Output Format:** Markdown-formatted summaries. \n", + "- 🔗 **Scope:** Processes only the given webpage URL (not the entire site). \n", + "- 🚀 **Tools:** Python, Requests, Selenium, BeautifulSoup, OpenAI API, Ollama. \n", + "- 🧑‍💻 **Skill Level:** Beginner.\n", + "\n", + "🛠️ Requirements\n", + "- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n", + "- 🔑 OpenAI API Key (for GPT model)\n", + "- Install Ollama and pull llama3.2:3b or another lightweight model\n", + "- Google Chrome browser installed\n", + "\n", + "**✨ This script handles both JavaScript and non-JavaScript websites using Selenium with Chrome WebDriver for reliable content extraction from modern web applications.**\n", + "\n", + "Let's get started and automate website summarization! 🚀\n", + "\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/01_basic_llm_project.jpg?raw=true)\n", + "\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "markdown", + "id": "d70aa4b0", + "metadata": {}, + "source": [ + "## 🛠️ Environment Setup & Dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ebf2fa36", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install selenium webdriver-manager" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1dcf1d9d-c540-4900-b14e-ad36a28fc822", + "metadata": {}, + "outputs": [], + "source": [ + "# ===========================\n", + "# System & Environment\n", + "# ===========================\n", + "import os\n", + "from dotenv import load_dotenv\n", + "\n", + "# ===========================\n", + "# Web Scraping\n", + "# ===========================\n", + "import time\n", + "from bs4 import BeautifulSoup\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.support.ui import WebDriverWait\n", + "from selenium.webdriver.support import expected_conditions as EC\n", + "\n", + "# ===========================\n", + "# AI-related\n", + "# ===========================\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import ollama" + ] + }, + { + "cell_type": "markdown", + "id": "cc20642b", + "metadata": {}, + "source": [ + "## 🔐 Model Configuration & Authentication" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8598c299-05ca-492e-b085-6bcc2f7dda0d", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if not api_key:\n", + " raise ValueError(\"OPENAI_API_KEY not found in environment variables\")\n", + "\n", + "print(\"✅ API key loaded successfully!\")\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8098defb", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL_OPENAI = \"gpt-4o-mini\"\n", + "MODEL_OLLAMA = \"llama3.2:3b\"" + ] + }, + { + "cell_type": "markdown", + "id": "2bd1d83f", + "metadata": {}, + "source": [ + "## 🌐 Web Scraping Infrastructure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6fe5114", + "metadata": {}, + "outputs": [], + "source": [ + "class WebsiteCrawler:\n", + " def __init__(self, url):\n", + " self.url = url\n", + " self.title = \"\"\n", + " self.text = \"\"\n", + " self.scrape()\n", + "\n", + " def scrape(self):\n", + " try:\n", + " # Chrome options\n", + " chrome_options = Options()\n", + " chrome_options.add_argument(\"--headless\")\n", + " chrome_options.add_argument(\"--no-sandbox\")\n", + " chrome_options.add_argument(\"--disable-dev-shm-usage\")\n", + " chrome_options.add_argument(\"--disable-gpu\")\n", + " chrome_options.add_argument(\"--window-size=1920,1080\")\n", + " chrome_options.add_argument(\"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36\")\n", + "\n", + " # Try to find Chrome\n", + " chrome_paths = [\n", + " r\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\",\n", + " r\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\",\n", + " r\"C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe\".format(os.getenv('USERNAME')),\n", + " ]\n", + "\n", + " chrome_binary = None\n", + " for path in chrome_paths:\n", + " if os.path.exists(path):\n", + " chrome_binary = path\n", + " break\n", + "\n", + " if chrome_binary:\n", + " chrome_options.binary_location = chrome_binary\n", + "\n", + " # Create driver\n", + " driver = webdriver.Chrome(options=chrome_options)\n", + " driver.set_page_load_timeout(30)\n", + "\n", + " print(f\"🔍 Loading: {self.url}\")\n", + " driver.get(self.url)\n", + "\n", + " # Wait for page to load\n", + " time.sleep(5)\n", + "\n", + " # Try to wait for main content\n", + " try:\n", + " WebDriverWait(driver, 10).until(\n", + " EC.presence_of_element_located((By.TAG_NAME, \"main\"))\n", + " )\n", + " except Exception:\n", + " try:\n", + " WebDriverWait(driver, 10).until(\n", + " EC.presence_of_element_located((By.TAG_NAME, \"body\"))\n", + " )\n", + " except Exception:\n", + " pass # Continue anyway\n", + "\n", + " # Get title and page source\n", + " self.title = driver.title\n", + " page_source = driver.page_source\n", + " driver.quit()\n", + "\n", + " print(f\"✅ Page loaded: {self.title}\")\n", + "\n", + " # Parse with BeautifulSoup\n", + " soup = BeautifulSoup(page_source, 'html.parser')\n", + "\n", + " # Remove unwanted elements\n", + " for element in soup([\"script\", \"style\", \"img\", \"input\", \"button\", \"nav\", \"footer\", \"header\"]):\n", + " element.decompose()\n", + "\n", + " # Get main content\n", + " main = soup.find('main') or soup.find('article') or soup.find('.content') or soup.find('body')\n", + " if main:\n", + " self.text = main.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = soup.get_text(separator=\"\\n\", strip=True)\n", + "\n", + " # Clean up text\n", + " lines = [line.strip() for line in self.text.split('\\n') if line.strip() and len(line.strip()) > 2]\n", + " self.text = '\\n'.join(lines[:200]) # Limit to first 200 lines\n", + "\n", + " print(f\"📄 Extracted {len(self.text)} characters\")\n", + "\n", + " except Exception as e:\n", + " print(f\"❌ Error occurred: {e}\")\n", + " self.title = \"Error occurred\"\n", + " self.text = \"Could not scrape website content\"" + ] + }, + { + "cell_type": "markdown", + "id": "d727feff", + "metadata": {}, + "source": [ + "## 🧠 Prompt Engineering & Templates" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02e3a673-a8a1-4101-a441-3816f7ab9e4d", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86bb80f9-9e7c-4825-985f-9b83fe50839f", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89998b18-77aa-4aaf-a137-f0d078d61f75", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "id": "cde36d4f", + "metadata": {}, + "source": [ + "## 📝 Summarization " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5636affe", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize_gpt(url):\n", + " \"\"\"Scrape website and summarize with GPT\"\"\"\n", + " site = WebsiteCrawler(url)\n", + "\n", + " if \"Error occurred\" in site.title or len(site.text) < 50:\n", + " print(f\"❌ Failed to scrape meaningful content from {url}\")\n", + " return\n", + "\n", + " print(\"🤖 Creating summary...\")\n", + "\n", + " # Create summary\n", + " response = openai.chat.completions.create(\n", + " model=MODEL_OPENAI,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(site)}\n", + " ]\n", + " )\n", + "\n", + " web_summary = response.choices[0].message.content\n", + " display(Markdown(web_summary))\n", + "\n", + "summarize_gpt('https://openai.com')\n", + "# summarize_gpt('https://stripe.com')\n", + "# summarize_gpt('https://vercel.com')\n", + "# summarize_gpt('https://react.dev')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90b9a8f8-0c1c-40c8-a4b3-e8e1fcd29df5", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize_ollama(url):\n", + " website = WebsiteCrawler(url)\n", + " response = ollama.chat(\n", + " model=MODEL_OLLAMA,\n", + " messages=messages_for(website))\n", + " display(Markdown(response['message']['content'])) # Generate and display output\n", + "\n", + "summarize_ollama('https://github.com')\n", + "# summarize_ollama('https://nextjs.org')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/02_brochure_generator.ipynb b/week1/community-contributions/02_brochure_generator.ipynb new file mode 100644 index 0000000..5b81824 --- /dev/null +++ b/week1/community-contributions/02_brochure_generator.ipynb @@ -0,0 +1,370 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "dc8af57c-23a9-452e-9fc3-0e5027edda14", + "metadata": {}, + "source": [ + "# AI-powered Brochure Generator\n", + "---\n", + "- 🌍 Task: Generate a company brochure using its name and website for clients, investors, and recruits.\n", + "- 🧠 Model: Toggle `USE_OPENAI` to switch between OpenAI and Ollama models\n", + "- 🕵️‍♂️ Data Extraction: Scraping website content and filtering key links (About, Products, Careers, Contact).\n", + "- 📌 Output Format: a Markdown-formatted brochure streamed in real-time.\n", + "- 🚀 Tools: BeautifulSoup, OpenAI API, and IPython display, ollama.\n", + "- 🧑‍💻 Skill Level: Intermediate.\n", + "\n", + "🛠️ Requirements\n", + "- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n", + "- 🔑 OpenAI API Key \n", + "- Install Ollama and pull llama3.2:3b or another lightweight model\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "markdown", + "id": "ec869f2c", + "metadata": {}, + "source": [ + "## 🧩 System Design Overview\n", + "\n", + "### Class Structure\n", + "\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/02_brochure_class_diagram.png?raw=true)\n", + "\n", + "This code consists of three main classes:\n", + "\n", + "1. **`Website`**: \n", + " - Scrapes and processes webpage content. \n", + " - Extracts **text** and **links** from a given URL. \n", + "\n", + "2. **`LLMClient`**: \n", + " - Handles interactions with **OpenAI or Ollama (`llama3`, `deepseek`, `qwen`)**. \n", + " - Uses `get_relevant_links()` to filter webpage links. \n", + " - Uses `generate_brochure()` to create and stream a Markdown-formatted brochure. \n", + "\n", + "3. **`BrochureGenerator`**: \n", + " - Uses `Website` to scrape the main webpage and relevant links. \n", + " - Uses `LLMClient` to filter relevant links and generate a brochure. \n", + " - Calls `generate()` to run the entire process.\n", + "\n", + "### Workflow\n", + "\n", + "1. **`main()`** initializes `BrochureGenerator` and calls `generate()`. \n", + "2. **`generate()`** calls **`LLMClient.get_relevant_links()`** to extract relevant links using **LLM (OpenAI/Ollama)**. \n", + "3. **`Website` scrapes the webpage**, extracting **text and links** from the given URL. \n", + "4. **Relevant links are re-scraped** using `Website` to collect additional content. \n", + "5. **All collected content is passed to `LLMClient.generate_brochure()`**. \n", + "6. **`LLMClient` streams the generated brochure** using **OpenAI or Ollama**. \n", + "7. **The final brochure is displayed in Markdown format.**\n", + "\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/02_brochure_process.png?raw=true)\n", + "\n", + "\n", + "### Intermediate reasoning\n", + "\n", + "In this workflow, we have intermediate reasoning because the LLM is called twice:\n", + "\n", + "1. **First LLM call**: Takes raw links → filters/selects relevant ones (reasoning step).\n", + "2. **Second LLM call**: Takes selected content → generates final brochure.\n", + "\n", + "🧠 **LLM output becomes LLM input** — that’s intermediate reasoning.\n", + "\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/02_llm_intermd_reasoning.png?raw=true)" + ] + }, + { + "cell_type": "markdown", + "id": "4b286461-35ee-4bc5-b07d-af554923e36d", + "metadata": {}, + "source": [ + "## 📦 Import Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fe5670c-5146-474b-9e75-484210533f55", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "import json\n", + "import ollama\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import display, Markdown, update_display\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "markdown", + "id": "f3e23181-1e66-410d-a910-1fb4230f8088", + "metadata": {}, + "source": [ + "## 🧠 Define the Model\n", + "\n", + "The user can switch between OpenAI and Ollama by changing a single variable (`USE_OPENAI`). The model selection is dynamic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa2bd452-0cf4-4fec-9542-e1c86584c23f", + "metadata": {}, + "outputs": [], + "source": [ + "# Load API key\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "if not api_key or not api_key.startswith('sk-'):\n", + " raise ValueError(\"Invalid OpenAI API key. Check your .env file.\")\n", + "\n", + "# Define the model dynamically\n", + "USE_OPENAI = True # True to use openai and False to use Ollama\n", + "MODEL = 'gpt-4o-mini' if USE_OPENAI else 'llama3.2:3b'\n", + "\n", + "openai_client = OpenAI() if USE_OPENAI else None" + ] + }, + { + "cell_type": "markdown", + "id": "4fd997b7-1b89-4817-b53a-078164f5f71f", + "metadata": {}, + "source": [ + "## 🏗️ Define Classes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aed1af59-8b8f-4add-98dc-a9f1b5b511a5", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to scrape and process website content.\n", + " \"\"\"\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " self.text = self.extract_text(soup)\n", + " self.links = self.extract_links(soup)\n", + "\n", + " def extract_text(self, soup):\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " return soup.body.get_text(separator=\"\\n\", strip=True)\n", + " return \"\"\n", + "\n", + " def extract_links(self, soup):\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " return [link for link in links if link and 'http' in link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea04dc7e-ff4c-4113-83b7-0bddcf5072b9", + "metadata": {}, + "outputs": [], + "source": [ + "class LLMClient:\n", + " def __init__(self, model=MODEL):\n", + " self.model = model\n", + "\n", + " def get_relevant_links(self, website):\n", + " link_system_prompt = \"\"\"\n", + " You are given a list of links from a company website.\n", + " Select only relevant links for a brochure (About, Company, Careers, Products, Contact).\n", + " Exclude login, terms, privacy, and emails.\n", + "\n", + " ### **Instructions**\n", + " - Return **only valid JSON**.\n", + " - **Do not** include explanations, comments, or Markdown.\n", + " - Example output:\n", + " {\n", + " \"links\": [\n", + " {\"type\": \"about\", \"url\": \"https://company.com/about\"},\n", + " {\"type\": \"contact\", \"url\": \"https://company.com/contact\"},\n", + " {\"type\": \"product\", \"url\": \"https://company.com/products\"}\n", + " ]\n", + " }\n", + " \"\"\"\n", + "\n", + " user_prompt = f\"\"\"\n", + " Here is the list of links on the website of {website.url}:\n", + " Please identify the relevant web links for a company brochure. Respond in JSON format.\n", + " Do not include login, terms of service, privacy, or email links.\n", + " Links (some might be relative links):\n", + " {', '.join(website.links)}\n", + " \"\"\"\n", + "\n", + " if USE_OPENAI:\n", + " response = openai_client.chat.completions.create(\n", + " model=self.model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + " )\n", + " return json.loads(response.choices[0].message.content.strip())\n", + " else:\n", + " response = ollama.chat(\n", + " model=self.model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + " )\n", + " result = response.get(\"message\", {}).get(\"content\", \"\").strip()\n", + " try:\n", + " return json.loads(result) # Attempt to parse JSON\n", + " except json.JSONDecodeError:\n", + " print(\"Error: Response is not valid JSON\")\n", + " return {\"links\": []} # Return empty list if parsing fails\n", + "\n", + "\n", + " def generate_brochure(self, company_name, content, language):\n", + " system_prompt = \"\"\"\n", + " You are a professional translator and writer who creates fun and engaging brochures.\n", + " Your task is to read content from a company’s website and write a short, humorous, joky,\n", + " and entertaining brochure for potential customers, investors, and job seekers.\n", + " Include details about the company’s culture, customers, and career opportunities if available.\n", + " Respond in Markdown format.\n", + " \"\"\"\n", + "\n", + " user_prompt = f\"\"\"\n", + " Create a fun brochure for '{company_name}' using the following content:\n", + " {content[:5000]}\n", + " Respond in {language} only, and format your response correctly in Markdown.\n", + " Do NOT escape characters or return extra backslashes.\n", + " \"\"\"\n", + "\n", + " if USE_OPENAI:\n", + " response_stream = openai_client.chat.completions.create(\n", + " model=self.model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in response_stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + " else:\n", + " response_stream = ollama.chat(\n", + " model=self.model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " full_text = \"\"\n", + " for chunk in response_stream:\n", + " if \"message\" in chunk:\n", + " content = chunk[\"message\"][\"content\"] or \"\"\n", + " full_text += content\n", + " update_display(Markdown(full_text), display_id=display_handle.display_id)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1c69651f-e004-421e-acc5-c439e57a8762", + "metadata": {}, + "outputs": [], + "source": [ + "class BrochureGenerator:\n", + " \"\"\"\n", + " Main class to generate a company brochure.\n", + " \"\"\"\n", + " def __init__(self, company_name, url, language='English'):\n", + " self.company_name = company_name\n", + " self.url = url\n", + " self.language = language\n", + " self.website = Website(url)\n", + " self.llm_client = LLMClient()\n", + "\n", + " def generate(self):\n", + " links = self.llm_client.get_relevant_links(self.website)\n", + " content = self.website.get_contents()\n", + "\n", + " for link in links['links']:\n", + " linked_website = Website(link['url'])\n", + " content += f\"\\n\\n{link['type']}:\\n\"\n", + " content += linked_website.get_contents()\n", + "\n", + " self.llm_client.generate_brochure(self.company_name, content, self.language)\n" + ] + }, + { + "cell_type": "markdown", + "id": "1379d39d", + "metadata": {}, + "source": [ + "## 📝 Generate Brochure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a63519a-1981-477b-9de1-f1ff9be94201", + "metadata": {}, + "outputs": [], + "source": [ + "def main():\n", + " company_name = \"Tour Eiffel\"\n", + " url = \"https://www.toureiffel.paris/fr\"\n", + " language = \"French\"\n", + "\n", + " generator = BrochureGenerator(company_name, url, language)\n", + " generator.generate()\n", + "\n", + "if __name__ == \"__main__\":\n", + " main()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/03_tech_explainer.ipynb b/week1/community-contributions/03_tech_explainer.ipynb new file mode 100644 index 0000000..7e8f2f9 --- /dev/null +++ b/week1/community-contributions/03_tech_explainer.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "6e907206-4c13-4698-91c6-9ca1c32be8e7", + "metadata": {}, + "source": [ + "# TechExplainAI\n", + "---\n", + "\n", + "AI-driven tool that provides concise, structured explanations for technical questions and code snippets.\n", + "\n", + "- 🌍 Task: AI-powered technical explanation generator\n", + "- 🧠 Model: OpenAI's `GPT-4o-mini`, Ollama's `llama3.2:3b`\n", + "- 📌 Output Format: Markdown with real-time streaming\n", + "- 🧑‍💻 Skill Level: Beginner\n", + "- 🔄 Interaction Mode: User enters a technical question → AI generates a structured, concise explanation\n", + "- 🎯 Purpose: Quickly explain technical concepts and Python code snippets\n", + "- 🔧 Customization: Users can modify the models, prompts, and formatting as needed\n", + "\n", + "🛠️ Requirements\n", + "- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n", + "- 🔑 OpenAI API Key\n", + "- Install Ollama and pull llama3.2:3b or another lightweight model\n", + "\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f743c87a-ed80-43d5-84ad-c78c8bdacb09", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import openai\n", + "import ollama\n", + "from dotenv import load_dotenv\n", + "from IPython.display import display, Markdown, update_display\n", + "\n", + "# Load environment variables\n", + "load_dotenv(override=True)\n", + "\n", + "# Set up OpenAI API key\n", + "OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\n", + "if not OPENAI_API_KEY:\n", + " raise ValueError(\"Please set your OpenAI API key in environment variables.\")\n", + "\n", + "# Constants\n", + "MODEL_GPT = \"gpt-4o-mini\"\n", + "MODEL_LLAMA = \"llama3.2:3b\"\n", + "\n", + "# Prompt user for question (until input is provided)\n", + "while True:\n", + " question = input(\"Hello, I am your personal technical tutor. Enter your question: \").strip()\n", + " if question:\n", + " break # Proceed only if a valid question is entered\n", + " print(\"Question cannot be empty. Please enter a question.\")\n", + "\n", + "# Common user prompt\n", + "user_prompt = f\"\"\"\n", + "Please give a detailed explanation to the following question: {question}.\n", + "Be less verbose.\n", + "Provide a clear and concise explanation without unnecessary elaboration.\n", + "\"\"\"\n", + "\n", + "# Common system prompt\n", + "system_prompt = \"\"\"\n", + "You are a helpful AI assistant that explains Python code in a clear and concise manner. Provide structured explanations and examples when necessary.\n", + "Be less verbose.\n", + "\"\"\"\n", + "\n", + "def ask_openai():\n", + " \"\"\"Gets response from OpenAI's GPT model with streaming.\"\"\"\n", + " print(\"\\n\\n\\n🚀🤖🚀 Response from OpenAI GPT-4o-mini 🚀🤖🚀\")\n", + " client = openai.OpenAI(api_key=OPENAI_API_KEY)\n", + " response_stream = client.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in response_stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + "\n", + "def ask_ollama():\n", + " \"\"\"Gets response from Ollama's Llama 3.2 model with streaming.\"\"\"\n", + " print(\"\\n\\n\\n🔥✨🔥 Response from Llama 3.2 🔥✨🔥\\n\")\n", + " response = ollama.chat(\n", + " model=MODEL_LLAMA,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + "\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " full_text = \"\"\n", + " for chunk in response:\n", + " if \"message\" in chunk:\n", + " content = chunk[\"message\"][\"content\"] or \"\"\n", + " full_text += content\n", + " update_display(Markdown(full_text), display_id=display_handle.display_id)\n", + "\n", + "# Call the functions\n", + "ask_openai()\n", + "ask_ollama()\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2d511eb3c62f3b2b985b358b91540006848636b0 Mon Sep 17 00:00:00 2001 From: lisekarimi Date: Thu, 5 Jun 2025 16:40:08 +0200 Subject: [PATCH 21/23] Add week4 contributions --- .../07_data_generator.ipynb | 569 ++++++++++++++++++ 1 file changed, 569 insertions(+) create mode 100644 week4/community-contributions/07_data_generator.ipynb diff --git a/week4/community-contributions/07_data_generator.ipynb b/week4/community-contributions/07_data_generator.ipynb new file mode 100644 index 0000000..6de3bcf --- /dev/null +++ b/week4/community-contributions/07_data_generator.ipynb @@ -0,0 +1,569 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "BSbc4VbLi2Ek" + }, + "source": [ + "# Synthetic Dataset generator\n", + "- 🚀 Live Demo: https://huggingface.co/spaces/lisekarimi/datagen\n", + "- 🧑‍💻 Repo: https://github.com/lisekarimi/datagen\n", + "\n", + "---\n", + "\n", + "- 🌍 **Task**: Generate realistic synthetic datasets\n", + "- 🎯 **Supported Data Types**: Tabular, Text, Time-series\n", + "- 🧠 **Models**: GPT (OpenAI) , Claude (Anthropic), CodeQwen1.5-7B-Chat (via Hugging Face Inference) / Llama (in Google Colab through T4 GPU)\n", + "- 🚀 **Tools**: Python, Gradio UI, OpenAI / Anthropic / HuggingFace APIs\n", + "- 📤 **Output Formats**: JSON and CSV file\n", + "- 🧑‍💻 **Skill Level**: Intermediate\n", + "\n", + "🎯 **How It Works**\n", + "\n", + "1️⃣ Define your business problem or dataset topic.\n", + "\n", + "2️⃣ Choose the dataset type, output format, model, and number of samples.\n", + "\n", + "3️⃣ The LLM generates the code; you can adjust or modify it as needed.\n", + "\n", + "4️⃣ Execute the code to generate your output file.\n", + "\n", + "🛠️ **Requirements** \n", + "- ⚙️ **Hardware**: ✅ GPU required (model download); Google Colab recommended (T4)\n", + "- 🔑 OpenAI API Key (for GPT) \n", + "- 🔑 Anthropic API Key (for Claude) \n", + "- 🔑 Hugging Face Token \n", + "\n", + "**Deploy CodeQwen Endpoint:**\n", + "- Visit https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat\n", + "- Click **Deploy** → **Inference Endpoints** → **Create Endpoint** (requires credit card)\n", + "- Copy your endpoint URL: `https://[id].us-east-1.aws.endpoints.huggingface.cloud`\n", + "\n", + "⚙️ **Customizable by user** \n", + "- 🤖 Selected model: GPT / Claude / Llama / Code Qwen\n", + "- 📜 `system_prompt`: Controls model behavior (concise, accurate, structured) \n", + "- 💬 `user_prompt`: Dynamic — include other fields\n", + "\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9E-Ioggxi2Em" + }, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "pR-ftUatjEGd", + "outputId": "ae5668c5-c369-4066-bbbf-b560fb28e39a" + }, + "outputs": [], + "source": [ + "# Install required packages in Google Colab\n", + "%pip install -q python-dotenv gradio anthropic openai requests torch bitsandbytes transformers sentencepiece accelerate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VPmk2-Ggi2Em" + }, + "outputs": [], + "source": [ + "import re\n", + "import sys\n", + "import subprocess\n", + "import threading\n", + "import anthropic\n", + "import torch\n", + "import gradio as gr\n", + "from openai import OpenAI\n", + "from huggingface_hub import InferenceClient, login\n", + "from google.colab import userdata\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, BitsAndBytesConfig" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DUQ55_oji2En" + }, + "source": [ + "## Initialization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MiicxGawi2En" + }, + "outputs": [], + "source": [ + "# Google Colab User Data\n", + "# Ensure you have set the following in your Google Colab environment:\n", + "openai_api_key = userdata.get(\"OPENAI_API_KEY\")\n", + "anthropic_api_key = userdata.get(\"ANTHROPIC_API_KEY\")\n", + "hf_token = userdata.get('HF_TOKEN')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", + "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"\n", + "\n", + "code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", + "CODE_QWEN_URL = \"https://zfkokxzs1xrqv13v.us-east-1.aws.endpoints.huggingface.cloud\"\n", + "\n", + "login(hf_token, add_to_git_credential=True)\n", + "openai = OpenAI(api_key=openai_api_key)\n", + "claude = anthropic.Anthropic(api_key=anthropic_api_key)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ipA1F440i2En" + }, + "source": [ + "## Prompts definition" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JgtqCyRji2En" + }, + "outputs": [], + "source": [ + "system_message = \"\"\"\n", + "You are a helpful assistant whose main purpose is to generate datasets for business problems.\n", + "\n", + "Be less verbose.\n", + "Be accurate and concise.\n", + "\n", + "The user will describe a business problem. Based on this, you must generate a synthetic dataset that fits the context.\n", + "\n", + "The dataset should be saved in a specific format such as CSV, JSON — the desired format will be specified by the user.\n", + "\n", + "The dependencies for python code should include only standard python libraries such as numpy, pandas and built-in libraries.\n", + "\n", + "When saving a DataFrame to JSON using `to_json()`, do not use the `encoding` parameter. Instead, manually open the file with `open()` and specify the encoding. Then pass the file object to `to_json()`.\n", + "\n", + "Ensure Python code blocks are correctly indented, especially inside `with`, `for`, `if`, `try`, and `def` blocks.\n", + "\n", + "Return only the Python code that generates and saves the dataset.\n", + "After saving the file, print the code that was executed and a message confirming the dataset was generated successfully.\n", + "\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Bk6saP4oi2Eo" + }, + "outputs": [], + "source": [ + "def user_prompt(**input_data):\n", + " user_prompt = f\"\"\"\n", + " Generate a synthetic {input_data[\"dataset_type\"].lower()} dataset in {input_data[\"output_format\"].upper()} format.\n", + " Business problem: {input_data[\"business_problem\"]}\n", + " Samples: {input_data[\"num_samples\"]}\n", + " \"\"\"\n", + " return user_prompt\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XnrPiAZ7i2Eo" + }, + "source": [ + "## Call API for Closed Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Sx7hHKczi2Eo" + }, + "outputs": [], + "source": [ + "def stream_gpt(user_prompt):\n", + " stream = openai.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\",\"content\": user_prompt},\n", + " ],\n", + " stream=True,\n", + " )\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or \"\"\n", + " yield response\n", + "\n", + " return response\n", + "\n", + "\n", + "def stream_claude(user_prompt):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\",\"content\": user_prompt}\n", + " ]\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " yield reply\n", + " print(text, end=\"\", flush=True)\n", + " return reply\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PUPeZ4xPi2Eo" + }, + "source": [ + "## Call Open Source Models\n", + "- Llama is downloaded and run on T4 GPU (Google Colab).\n", + "- Code Qwen is run through inference endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "W0AuZT2uk0Sd" + }, + "outputs": [], + "source": [ + "def stream_llama(user_prompt):\n", + " try:\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\",\"content\": user_prompt},\n", + " ]\n", + "\n", + " tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + "\n", + " quant_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + " )\n", + "\n", + " model = AutoModelForCausalLM.from_pretrained(\n", + " LLAMA,\n", + " device_map=\"auto\",\n", + " quantization_config=quant_config\n", + " )\n", + "\n", + " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", + " streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=False)\n", + "\n", + " thread = threading.Thread(target=model.generate, kwargs={\n", + " \"input_ids\": inputs,\n", + " \"max_new_tokens\": 1000,\n", + " \"pad_token_id\": tokenizer.eos_token_id,\n", + " \"streamer\": streamer\n", + " })\n", + " thread.start()\n", + "\n", + " started = False\n", + " reply = \"\"\n", + "\n", + " for new_text in streamer:\n", + " if not started:\n", + " if \"<|start_header_id|>assistant<|end_header_id|>\" in new_text:\n", + " started = True\n", + " new_text = new_text.split(\"<|start_header_id|>assistant<|end_header_id|>\")[-1].strip()\n", + " else:\n", + " continue\n", + "\n", + " if \"<|eot_id|>\" in new_text:\n", + " new_text = new_text.replace(\"<|eot_id|>\", \"\")\n", + " if new_text.strip():\n", + " reply += new_text\n", + " yield reply\n", + " break\n", + "\n", + " if new_text.strip():\n", + " reply += new_text\n", + " yield reply\n", + "\n", + " return reply\n", + "\n", + " except Exception as e:\n", + " print(f\"LLaMA error: {e}\")\n", + " raise\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "V0JS_6THi2Eo" + }, + "outputs": [], + "source": [ + "def stream_code_qwen(user_prompt):\n", + " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\",\"content\": user_prompt},\n", + " ]\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n", + " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", + " result = \"\"\n", + " for r in stream:\n", + " result += r.token.text\n", + " yield result" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PqG57dJIi2Eo" + }, + "source": [ + "## Select the model and generate the ouput" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YqSKnklRi2Eo" + }, + "outputs": [], + "source": [ + "def generate_from_inputs(model, **input_data):\n", + " # print(\"🔍 input_data received:\", input_data)\n", + " user_prompt_str = user_prompt(**input_data)\n", + "\n", + " if model == \"GPT\":\n", + " result = stream_gpt(user_prompt_str)\n", + " elif model == \"Claude\":\n", + " result = stream_claude(user_prompt_str)\n", + " elif model == \"Llama\":\n", + " result = stream_llama(user_prompt_str)\n", + " elif model == \"Code Qwen\":\n", + " result = stream_code_qwen(user_prompt_str)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + "\n", + " for stream_so_far in result:\n", + " yield stream_so_far\n", + "\n", + " return result\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zG6_TSfni2Eo" + }, + "outputs": [], + "source": [ + "def handle_generate(business_problem, dataset_type, dataset_format, num_samples, model):\n", + " input_data = {\n", + " \"business_problem\": business_problem,\n", + " \"dataset_type\": dataset_type,\n", + " \"output_format\": dataset_format,\n", + " \"num_samples\": num_samples,\n", + " }\n", + "\n", + " response = generate_from_inputs(model, **input_data)\n", + " for chunk in response:\n", + " yield chunk\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "p5DQcx71i2Ep" + }, + "source": [ + "## Extract python code from the LLM output and execute it locally" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NcEkmsnai2Ep", + "jp-MarkdownHeadingCollapsed": true + }, + "outputs": [], + "source": [ + "def extract_code(text):\n", + " match = re.search(r\"```python(.*?)```\", text, re.DOTALL)\n", + "\n", + " if match:\n", + " code = match.group(0).strip()\n", + " else:\n", + " code = \"\"\n", + " print(\"No matching substring found.\")\n", + "\n", + " return code.replace(\"```python\\n\", \"\").replace(\"```\", \"\")\n", + "\n", + "\n", + "def execute_code_in_virtualenv(text, python_interpreter=sys.executable):\n", + " if not python_interpreter:\n", + " raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n", + "\n", + " code_str = extract_code(text)\n", + " command = [python_interpreter, '-c', code_str]\n", + "\n", + " try:\n", + " result = subprocess.run(command, check=True, capture_output=True, text=True)\n", + " stdout = result.stdout\n", + " return stdout\n", + "\n", + " except subprocess.CalledProcessError as e:\n", + " return f\"Execution error:\\n{e}\"\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DQgEyFzJi2Ep" + }, + "source": [ + "## Gradio interface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SEiZVkdFi2Ep" + }, + "outputs": [], + "source": [ + "def update_output_format(dataset_type):\n", + " if dataset_type in [\"Tabular\", \"Time-series\"]:\n", + " return gr.update(choices=[\"JSON\", \"csv\"], value=\"JSON\")\n", + " elif dataset_type == \"Text\":\n", + " return gr.update(choices=[\"JSON\"], value=\"JSON\")\n", + "\n", + "with gr.Blocks() as ui:\n", + " gr.Markdown(\"## Create a dataset for a business problem\")\n", + "\n", + " with gr.Column():\n", + " business_problem = gr.Textbox(label=\"Business problem\", lines=2)\n", + " dataset_type = gr.Dropdown(\n", + " [\"Tabular\", \"Time-series\", \"Text\"], label=\"Dataset type\"\n", + " )\n", + "\n", + " output_format = gr.Dropdown( choices=[\"JSON\", \"csv\"], value=\"JSON\",label=\"Output Format\")\n", + "\n", + " num_samples = gr.Number(label=\"Number of samples\", value=10, precision=0)\n", + "\n", + " model = gr.Dropdown([\"GPT\", \"Claude\", \"Llama\", \"Code Qwen\"], label=\"Select model\", value=\"GPT\")\n", + "\n", + " dataset_type.change(update_output_format,inputs=[dataset_type], outputs=[output_format])\n", + "\n", + " with gr.Row():\n", + " with gr.Column():\n", + " dataset_run = gr.Button(\"Create a dataset\")\n", + " gr.Markdown(\"\"\"⚠️ For Llama and Code Qwen: The generated code might not be optimal. It's recommended to review it before execution.\n", + " Some mistakes may occur.\"\"\")\n", + "\n", + " with gr.Column():\n", + " code_run = gr.Button(\"Execute code for a dataset\")\n", + " gr.Markdown(\"\"\"⚠️ Be cautious when sharing this app with code execution publicly, as it could pose safety risks.\n", + " The execution of user-generated code may lead to potential vulnerabilities, and it’s important to use this tool responsibly.\"\"\")\n", + "\n", + " with gr.Row():\n", + " dataset_out = gr.Textbox(label=\"Generated Dataset\")\n", + " code_out = gr.Textbox(label=\"Executed code\")\n", + "\n", + " dataset_run.click(\n", + " handle_generate,\n", + " inputs=[business_problem, dataset_type, output_format, num_samples, model],\n", + " outputs=[dataset_out]\n", + " )\n", + "\n", + " code_run.click(\n", + " execute_code_in_virtualenv,\n", + " inputs=[dataset_out],\n", + " outputs=[code_out]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 646 + }, + "id": "jCAkTEtMi2Ep", + "outputId": "deeeb1a7-c432-4007-eba2-cbcc28dbc0ff" + }, + "outputs": [], + "source": [ + "ui.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 01b33b4edeb640666685169dda9a3ea7e3aacc20 Mon Sep 17 00:00:00 2001 From: lisekarimi Date: Thu, 5 Jun 2025 17:19:54 +0200 Subject: [PATCH 22/23] Add week2 contributions --- .../04_tribot_debate.ipynb | 429 ++++++++++++++ .../05_weathermate_ai_agent.ipynb | 557 ++++++++++++++++++ 2 files changed, 986 insertions(+) create mode 100644 week2/community-contributions/04_tribot_debate.ipynb create mode 100644 week2/community-contributions/05_weathermate_ai_agent.ipynb diff --git a/week2/community-contributions/04_tribot_debate.ipynb b/week2/community-contributions/04_tribot_debate.ipynb new file mode 100644 index 0000000..3fddadf --- /dev/null +++ b/week2/community-contributions/04_tribot_debate.ipynb @@ -0,0 +1,429 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "559ec769-087c-4c38-a6e4-4732f4ffb261", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "# TriBot Debate\n", + "---\n", + "\n", + "This notebook sets up a **three-bot chat system** where GPT (polite & humorous) 🎭, Claude (argumentative & snarky) 🔥, and DeepSeek (logical & analytical) 💡 engage in conversations with distinct personalities.\n", + "\n", + "- 🧑‍💻 **Skill Level:** Advanced \n", + "- 🎯 **Purpose:** Simulate diverse conversational styles for debate, analysis, and entertainment\n", + "\n", + "🛠️ Requirements\n", + "- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n", + "- 🔑 OpenAI API Key\n", + "- 🔑 Anthropic API Key (Claude)\n", + "- 🔑 Deepseek API Key\n", + " \n", + "🔧 Customizable by user\n", + "- Selected model: GPT / Claude / Deepseek\n", + "- System_prompt\n", + "- Starter sentences for each bot\n", + "- `max_turns` to control the number of responses in the conversation\n", + "\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "markdown", + "id": "fe78fae0", + "metadata": {}, + "source": [ + "## 📘 Class Diagram\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/04_3bot_class_diagram.png?raw=true)" + ] + }, + { + "cell_type": "markdown", + "id": "62a2f5ca-7d89-4ba7-b342-277452beb2f5", + "metadata": {}, + "source": [ + "## 📚 Imports & Keys" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce67806a-3e3b-426d-b442-c3bca2e3dda2", + "metadata": {}, + "outputs": [], + "source": [ + "from dotenv import load_dotenv\n", + "import os\n", + "import random\n", + "import anthropic\n", + "from openai import OpenAI\n", + "from IPython.display import display, Markdown, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd2613d2-b675-4633-aedf-37ea2a1f0234", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables from .env file\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(\"✅ OpenAI API Key is set.\")\n", + "else:\n", + " print(\"❌ OpenAI API Key not set.\")\n", + "\n", + "if anthropic_api_key:\n", + " print(\"✅ Anthropic API Key is set.\")\n", + "else:\n", + " print(\"❌ Anthropic API Key not set.\")\n", + "\n", + "if deepseek_api_key:\n", + " print(\"✅ Deepseek API Key is set.\")\n", + "else:\n", + " print(\"❌ Deepseek API Key not set.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb8d5d01-04b9-44e8-a713-da36e6dd9be1", + "metadata": {}, + "outputs": [], + "source": [ + "# Establishe connection with the chatbot APIs\n", + "\n", + "# OpenAI API Client\n", + "openai = OpenAI()\n", + "\n", + "# Anthropic API Client\n", + "claude = anthropic.Anthropic()\n", + "\n", + "# DeepSeek using OpenAI-compatible API\n", + "deepseek_client = OpenAI(\n", + " api_key=deepseek_api_key,\n", + " base_url=\"https://api.deepseek.com\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6881d2a1-3a5d-4d0d-a437-aae7fc2542de", + "metadata": {}, + "source": [ + "## 📋 Constants & Settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7c009278-ad4a-4bdf-8aa4-d0882155710d", + "metadata": {}, + "outputs": [], + "source": [ + "# We're using cheap versions of models so the costs will be minimal\n", + "GPT_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", + "DEEPSEEK_MODEL = \"deepseek-chat\"\n", + "\n", + "MAX_TURNS = 6 # Dynamic, can be adjusted by the user\n", + "\n", + "# System Prompts\n", + "GPT_SYSTEM = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "you try to calm them down and keep chatting. Avoid questions like 'How can I assist you?' or 'How can I help you?' \\\n", + "and dive directly into the conversation. Be less verbose, don't talk too much. \\\n", + "Go straight to the point, don't beat around the bush. Keep the conversation light, fun, and engaging with a touch of humor. \\\n", + "Throw in witty remarks, playful jokes, and entertaining responses when appropriate to keep things lively.\"\n", + "\n", + "CLAUDE_SYSTEM = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way. \\\n", + "Avoid questions like 'How can I assist you?' or 'How can I help you?' \\\n", + "and dive directly into the conversation. Be less verbose, don't talk too much. \\\n", + "Go straight to the point, don't beat around the bush.\"\n", + "\n", + "DEEPSEEK_SYSTEM = \"You are a highly logical and analytical chatbot. You break down \\\n", + "arguments with precise reasoning, focusing on facts and logic over emotions. You stay neutral \\\n", + "and detached, always pointing out inconsistencies or flaws in reasoning. \\\n", + "Avoid questions like 'How can I assist you?' or 'How can I help you?' \\\n", + "and dive directly into the conversation. Be less verbose, don't talk too much. \\\n", + "Go straight to the point, don't beat around the bush.\"\n", + "\n", + "# Define emojis for each bot\n", + "BOT_EMOJIS = {\n", + " \"GPT\": \"🎭\",\n", + " \"Claude\": \"🔥\",\n", + " \"Deepseek\": \"💡\"\n", + "}\n", + "\n", + "# Starter Messages\n", + "STARTER_GPT = \"Hey there! Let’s chat—serious debates, silly topics, or why cats rule the world. Your call!\"\n", + "STARTER_CLAUDE = \"Hello. Got an argument? Fine. Try me, but be ready—I won’t just agree.\"\n", + "STARTER_DEEPSEEK = \"Hi! Let’s dive into a focused discussion. What topic do you want to analyze?\"\n" + ] + }, + { + "cell_type": "markdown", + "id": "7a6c05cc-8bae-4d66-8378-1629092e5d15", + "metadata": {}, + "source": [ + "## 🤖 Bot Classes & Logic" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b9e0ac1-2569-4bdb-b392-8beb767646cb", + "metadata": {}, + "outputs": [], + "source": [ + "class Chatbot:\n", + " def __init__(self, name, model, system_prompt, starter_message):\n", + " self.name = name\n", + " self.model = model\n", + " self.system_prompt = system_prompt\n", + " self.starter_message = starter_message\n", + "\n", + " def reply(self, message_history):\n", + " \"\"\"Override this method in subclasses for specific chatbot behaviors.\"\"\"\n", + " raise NotImplementedError(\"Subclasses must implement this method.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9b883870-6311-4ba3-87e5-3a2bca46487a", + "metadata": {}, + "outputs": [], + "source": [ + "class GPTBot(Chatbot):\n", + " def reply(self, message_history):\n", + " \"\"\"Calls OpenAI GPT API and returns a response.\"\"\"\n", + " try:\n", + " # Explicitly include the system prompt in the messages list\n", + " messages = [{\"role\": \"system\", \"content\": self.system_prompt}] + [\n", + " {\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in message_history\n", + " ]\n", + " response = openai.chat.completions.create(\n", + " model=self.model,\n", + " messages=messages, # Use the explicitly formatted messages\n", + " temperature=0.4,\n", + " max_tokens=200,\n", + " stream=True\n", + " )\n", + " return response\n", + " except Exception as e:\n", + " return f\"Error in GPT response: {e}\"\n", + "\n", + "\n", + "class ClaudeBot(Chatbot):\n", + " def reply(self, message_history):\n", + " \"\"\"Calls Anthropic Claude API and returns a response.\"\"\"\n", + " try:\n", + " # Extract user/assistant messages\n", + " user_messages = [\n", + " {\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in message_history\n", + " ]\n", + " # Call Claude API with system prompt and user messages\n", + " response = claude.messages.stream(\n", + " model=self.model,\n", + " max_tokens=1000,\n", + " system=self.system_prompt, # Pass the system prompt\n", + " messages=user_messages # Pass the conversation history\n", + " )\n", + " return response\n", + " except Exception as e:\n", + " return f\"Error in Claude response: {e}\"\n", + "\n", + "\n", + "class DeepseekBot(Chatbot):\n", + " def reply(self, message_history):\n", + " \"\"\"Calls DeepSeek API using OpenAI-compatible client.\"\"\"\n", + " try:\n", + " # Explicitly include the system prompt in the messages list\n", + " messages = [{\"role\": \"system\", \"content\": self.system_prompt}] + [\n", + " {\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in message_history\n", + " ]\n", + " response = deepseek_client.chat.completions.create(\n", + " model=self.model,\n", + " messages=messages, # Use the explicitly formatted messages\n", + " max_tokens=200,\n", + " stream=True\n", + " )\n", + " return response\n", + " except Exception as e:\n", + " return f\"Error in DeepSeek response: {e}\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c07c8814-87af-4bb4-8a90-9d146c228d85", + "metadata": {}, + "outputs": [], + "source": [ + "class ChatManager:\n", + " def __init__(self, bots, max_turns=MAX_TURNS):\n", + " self.bots = bots # List of chatbot instances\n", + " self.max_turns = max_turns\n", + " self.message_history = []\n", + " self.current_bot = random.choice(self.bots) # Random starting bot\n", + "\n", + " def conversation(self):\n", + " \"\"\"Manages the chat loop up to max_turns.\"\"\"\n", + "\n", + " # Stream the first message as \"user\" role\n", + " emoji = BOT_EMOJIS.get(self.current_bot.name, \"🤖\") # Default emoji if not found\n", + " response = f\"{emoji} **{self.current_bot.name}:** \\n\"\n", + " display_handle = display(Markdown(response), display_id=True)\n", + "\n", + " for char in self.current_bot.starter_message:\n", + " update_display(Markdown(response + char), display_id=display_handle.display_id)\n", + " response += char\n", + "\n", + " # Store first message as \"user\" role\n", + " self.message_history.append({\"role\": \"assistant\", \"content\": self.current_bot.starter_message})\n", + "\n", + " print(\"\\n--------------\\n\") # Fancy separator\n", + "\n", + " for _ in range(self.max_turns - 1): # Already sent 1 message\n", + " self.current_bot = self._choose_next_bot()\n", + "\n", + " # Alternate roles while ensuring last role is always \"user\"\n", + " for i in range(len(self.message_history)):\n", + " self.message_history[i][\"role\"] = \"user\" if i % 2 == 0 else \"assistant\"\n", + "\n", + " # Ensure the last role is \"user\" before sending to the bot\n", + " if self.message_history[-1][\"role\"] != \"user\":\n", + " self.message_history[-1][\"role\"] = \"user\"\n", + "\n", + " # Pass only the message history to the bot and Get bot's response\n", + " response_stream = self.current_bot.reply(self.message_history)\n", + "\n", + " # Get the correct emoji for the bot\n", + " emoji = BOT_EMOJIS.get(self.current_bot.name, \"🤖\")\n", + "\n", + " # Display bot name separately before streaming starts\n", + " bot_header = f\"{emoji} **{self.current_bot.name}:** \\n\"\n", + " display_handle = display(Markdown(bot_header), display_id=True)\n", + "\n", + " # **Initialize response content separately (exclude bot name)**\n", + " response_content = \"\"\n", + "\n", + " if isinstance(self.current_bot, GPTBot) or isinstance(self.current_bot, DeepseekBot):\n", + " # Handle OpenAI GPT & DeepSeek\n", + " for chunk in response_stream:\n", + " new_text = chunk.choices[0].delta.content or '' # Get new streamed text\n", + " response_content += new_text # Append new content\n", + "\n", + " # Clean Markdown artifacts\n", + " response_content = response_content.replace(\"```\", \"\").replace(\"markdown\", \"\")\n", + "\n", + " # Update the content, without duplicating the bot name\n", + " update_display(Markdown(bot_header + response_content), display_id=display_handle.display_id)\n", + "\n", + " elif isinstance(self.current_bot, ClaudeBot):\n", + " # Handle Claude differently\n", + " with response_stream as stream:\n", + " for text in stream.text_stream:\n", + " response_content += text or '' # Append new streamed text\n", + " # Clean Markdown artifacts\n", + " response_content = response_content.replace(\"```\", \"\").replace(\"markdown\", \"\")\n", + "\n", + " update_display(Markdown(bot_header + response_content), display_id=display_handle.display_id)\n", + "\n", + " print(\"\\n--------------\\n\") # Fancy separator\n", + "\n", + " # Store bot response\n", + " self.message_history.append({\"role\": \"assistant\", \"content\": response_content})\n", + "\n", + "\n", + " def _choose_next_bot(self):\n", + " \"\"\"Selects the next bot dynamically (avoiding immediate self-replies).\"\"\"\n", + " available_bots = [bot for bot in self.bots if bot != self.current_bot]\n", + " return random.choice(available_bots)" + ] + }, + { + "cell_type": "markdown", + "id": "5d8fe072", + "metadata": {}, + "source": [ + "## 🗨️ Chat Engine" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca192335-31e0-4849-878a-da29069f90be", + "metadata": {}, + "outputs": [], + "source": [ + "def main():\n", + " # Initialize chatbot instances\n", + " gpt_bot = GPTBot(\"GPT\", GPT_MODEL, GPT_SYSTEM, STARTER_GPT)\n", + " claude_bot = ClaudeBot(\"Claude\", CLAUDE_MODEL, CLAUDE_SYSTEM, STARTER_CLAUDE)\n", + " deepseek_bot = DeepseekBot(\"Deepseek\", DEEPSEEK_MODEL, DEEPSEEK_SYSTEM, STARTER_DEEPSEEK)\n", + "\n", + " # Create chat manager with all bots\n", + " chat_manager = ChatManager([gpt_bot, claude_bot, deepseek_bot], max_turns=MAX_TURNS)\n", + " # chat_manager = ChatManager([gpt_bot, claude_bot], max_turns=MAX_TURNS)\n", + "\n", + " # Start the conversation\n", + " chat_manager.conversation()\n", + "\n", + "# Ensures the script runs only when executed directly\n", + "if __name__ == \"__main__\":\n", + " main()" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABIgAAAO3CAYAAABIp381AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAP+lSURBVHhe7P17eFX1nff/v6JmgySwgUACMaGAyCFJYzCGk60lWiJSsWJvGdRSpbf9ZTqIldKEjqXaKu0tcPPFCtwzmXoXHasyOiPexaEI1igWBdNgoEk4lEMKMZDEABuy0SRKfn+w13LtlbVPyQ4H9/NxXVwXWcfP+pzWZ73XYccNH5XdLgAAAAAAAMSsy+wTAAAAAAAAEFsIEAEAAAAAAMQ4AkQAAAAAAAAxjgARAAAAAABAjIuL5CPVZ3smqOGOf9Zln3rV60CZrqz5UGd7JOiTYdfpk+HX6XLvSQ347xVSe9ibBAAAAAAAwAUWUYDo0/Qs1X1/pX2yn2G/ukVxn7XaJwMAAAAAAOAiFVGAqD3+Sh16ZIN9sin+eK3SV862Tw7b3PnFGjr8ar9pNQcPaPWKpXK73Sp8aIEGJqf4zW9sqFfJ08t13fiJmjZ9ht88q/IPtmnt82vskwEAAAAAAGJeRN8gimv7RFd46nV5c5P6lf5OPer2KPGvb6nv1peks2flqj9oXyUsqWnp+uWTyzsEh8IxMDlFhQ8tUM+ePe2zAAAAAAAAEIaIniCSpGP3PKlPhuaoPb6H4tpaJEntl8crrv2s+r77gvq986x9laCsTwa1tbbp5RefU0V5meQLHH1z6rf078/8q99yG9avU+mmjcovmKpp02d0WG/W7DnKHTfBfPoIAAAAAAAAgUX0BJEkuRoOyr3tFcW1fqL2+B5qj++hK043yV3+B31rwAFdm96uPldKN4+RHvu2tPHHUu7QwDGo68ZPdAwOSVJd7RH9+zP/6re81d7qKp3xNiveFa9+SUn22R3kF0zVspUlKv7543K73fbZAAAAAAAAMSniAFGPj/bos76p+sry/6GBrz2pQS/+s9JX3yfXqK/qV3l79eqDcfrwF9K/3S99b5J0TYqUnRZn34wpJWWwJOnkyeM6tH+f+brZspUlWrayRL9evko5uXn21SRJozIy1SshUW2tbTrR1GSfDQAAAAAAgDBEHCDq9bdt+nRIpo7du0Sf9R2sT7+SrboH1yinT6OGxDXYF5ck/a3ePqVrpk2foWUrS8yPUn9Ue9jvyaNASjdtVNG8Qi194lF5PB77bAAAAAAAgJgUcYAo7rNWpa75kdR+Vicm36eTE/9BY878Va/2WqzVb7Xr8T+06y+HpCc3SKW7z62z56h9K1+orz83s2/f/ho2YqTqao/osZ8u0Ioli3XG22xfvIPyD7bxnSEAAAAAAIAuiDhAJElXeOqV+uyPNPyX+Rr+xM0a+/avlBD3ib7/9TiNGxan9w9I/zi5XRNGSCe80sdB4jzW7wjNuGuWUtPS7Yt0sGH9OhXNK1TRvEJ+uh4AAAAAAKCLOhUgstvne4Xsf2+UHnqxXa992K6v/6847T0q7T1mX9pfXe0Rvf3WZklSr4REzV+4SMtWlmj+wkXqlZBoX7xL+Eg1AAAAAABAR1EJEB09KR1qlPYcbdd7P5P+VBSnf/leu55/r107jwT+BTND6aaNAV8p2/zG62F9XwgAAAAAAACdEzd8VHboCE4YCrLa9eT/kNxXfvGLZX+tbdcPno1T42m/RQEAAAAAAHARubzfgJRf2Cd2xoGGOP21Nk6D3VLvHu36Q0WcFv6n1NQc+CfuAQAAAAAAcOFF7QkiAAAAAAAAXJqi8g0iAAAAAAAAXLoIEAEAAAAAAMQ4AkQAAAAAAAAxjgARAAAAAABAjCNABAAAAAAAEOMIEAEAAAAAAMS4y/sNSPmFfaKTufOLdfU1I1W5q8Jv+qzZc/SNmwtUtm2r3/Royy+Yqpn3fE+VO3eopaXFPjvqZs2eo6xrx3Y4Xklyu936UfHP5OrZQzUH9ttnRyQnN0+zvvc/tbtyZ5ePK5rpuhikpqXrJ488prjLLrvgxxOo/tulpqXrR0WP6NQpj44drbPPvmjZ89rari+mY8rJzdMPH/6JTp44fsHT0hXnuz+7kKLdL52vc05XhSrjcNvV3PnFGjfxhg7HG26fdCHYy9xeZnPnFyuxT++o1AcAAABEz0X7BNHc+cWaNXuOffIlz35c/ZKSdKyuVh6Px2+5C2HW7DmaO7/YPvmCqas9osd+ukClmzbaZ31pXagy6K68ttf3rqooL9NjC3+sivIy+yxEKNpl0x1S09K16IknlZObZ58VM9a98pL69e/vlwc5uXmKd/XQH//wqt+yAAAAQFdctAGiWDHimlHaU11pnwwAgOpqj2j/vr264cZ8yfd0TsG06dr5YdlFcWMBAAAAXx5xw0dlt9snOpk7v1hNHzdq7fNr/KbPmj1HSQMGavWKpZJv8Fr40AINTE6RJNUcPKDVK5Yqv2Cq8sZPUsnTy81BbX7BVE2+aYpKVj2lutojjuu3tbbp5RefU7+kJOWNn6T6o0eVdW2O37YN+QVTNW36DL/1KsrLlJqWru8XztWRv/9dWdfmmOulpqWr8MGH1SshUZJU/sE28/iMO+v245UljWXb33N84sIpHYf273M8LvuTEHPnF2vo8KslSY0N9X75ZZWTm6eZ99yneFe85Ev7H//wqgofWqB9e6o1NjdPvRISO+zHmjZjvbXPr/HbryRtWL/O8djCFeg4AuW5kaf1R49q1JhMnTx5XC+s+a3unfMDv3yeNXuOcsdNMPdjLzOnumisH2jfTqz7aWyoV2trm47V1ZrLO5Wxta5tffdtTb5pSlTKIFi67fUgnHKzHtsZb7NKVj0l7+lTfnllzUvjmF5/7b90w435HfoB44kna1sM1I4ryss6HL+1HYfKP2taAuWnkQeB6mAw9m05pc3ejxhycvN02x3f0e9KVquu9ohZbm+/tVmlmzb61UdJAfsz+3aMbc+YebfWvfxShz5DtvZmL1OjDAJNN/K3of5YwHpmNWv2HA1KTZPLFa++ffvrzY2va9KNkzuUidHfS+rQXwZqP1b2um2U4a2336mkAQMlyTxme1qd6riRlwb7Ocmex/a6FmndqK7a5bd9e5vYt6daKYMG++VbINa0GOdCY7vB0uVUl5zadqA6bbCXhZHf9v7YaHv2vtfeN8+dX6zqql0h+yoAAACcXxE9QZQ7boKWrSzx+2cdHBqDwsM1h1Q0r1CLFxUrITFR+QVTtbe6Si6XS8NGjDSXz8jM1u6qSr+Bu8fj0dInHlXNwQMq/2CbHlnwoDl4HpicopaWT1U0r1AvPPuMrkobovyCqZIl2LRiyWIVzSvU5jdeV8G06XK73ZKk+Ph4JfburaJ5hX7Bobff2qyieYVasWSxRowc1eVXGQKlQ1LA47Kum5CYqMWLilU0r1Dvvv0nv/kG40Lm5RefM9N+6uRJc/7Y3DyVrHpKRfMK9VHtYb98SB8y1EzbhvXrNCYzS6lp6Vq9YqnKP9immoMHVDSvsEsD90DHEU6epwwerCWP/0xLn3hUzadPWbZ67sJmTGaWmf4VSxZrTGaWWQdCmXHX3dpdVWnWzb/XHLQvIvn2M2ToMDP9Zdvf01Vpaeb8QGVsrWtG4NOYP+OuWUpNS5ciLINgeWY8SbD5jdfNecc//thMp5Oc3DyNGDnK3P+rL79kXySo6qpdGjJ0mHmsqWnp6te/v7ZuKfVbLlA7zi+Yqim33KYXnn3GLIeExES/V51C5Z+VvSxeePYZffrJJwHrYDDhps3aj1gd2r9Pra2tGpWRKUkalZGpXgmJysjMliQl9O4jSdpbXSUF6c/s25Gk0RlZajh2rEOfIUuArmheoYrmFWr71j9LkoaNGKl9e6rNY/F6vbox/5sByybc9iFJyckp2rRhvR5Z8KD27D53POGyl5m9/Rgqysu0asUSeU6e0AvPPqOlTzxqBviGDr9a1VW7VORrQ9k5uWY/Ym+/u6sqNeOuu/22LV85WM9JozOy1KtXgkZnZEmSklMGqbnZq0P793W5btjPjUXzCuVy9VCvXueCcaEYTxFN/ua5oFbZ9vfM4FCodIUSLN0Kcr5xu93q0aOnmc/lH2xT3vhJHcoRAAAAl46IAkTlH2wzB7fGv/IPtpnzh40YKZfLpS2lb0q+i8Sy7e8pIzO7w2PyxoVlJK9XNTbUm99cqCgv00e1h5WSMlhyCDbt2P6+WlvbzMF/W1ub30XsqIxMeb1e7dj+vmQZgBsXB50VKh2hJCQkmBeS77/7juMTDzfcmK/dlX81Lxbrao9ow/p15vy339ps7n/rllK5XC5zm//+zL+a8/ZWV6mtrU3JKYPMdaPF6TjCyXPjwsfO7XZryNBhfsdWV3tEu6sqzQvwcBhPHng8Hr3/7jv22eZ+rOko3bRRNQcPmMuEKuO2tjate2Wt33yv12te8EdSBuHkmdEG6mqPaOeOv5jTA4l3ucz97dzxFzMt4TDScd34iZIvfa2trTq0f59tSWcZmdnaVVFu1l2jj7AGnULln8Htditv/CS/OlFRXmaWq1MdDCbctNmDYQaPx6PDNYfM8khJGazKnRWKd/WQ2+0+V5bNzWZaA/Vnxn6Ni22jTlZX7fLbn3wX7/3699e6V74I9G1Yv051tUdUUV6m115ZK1nSZtT/QEK1D8NHtYcdg1XhCNV+wlFz8IAZxN6x/X2dPHlc/ZKSHNvvltI3lZCY2CHAWFd7RCeOn1tPknr36aPKnRVmHozOyDK/D9fVumE/N8r3baEzZ5r9lgvmj394VS7fEzxGOwwnXaEES7eCnG88Ho+ee+ZfzHzeU13pd64BAADApSeiAFEo/ZKS5O7bT/MXLjKfMJo2fYYSEhPldru1p7pS/fr3V2paukZlZOrE8eOdvsiwcrvdSkhM9HvCadHipboqLc0c/NulpAzWwOQULVq81O9pqFAXUMF0Jh1WpZs26u23Nmv+wkX69fJVjk8zGfuorz9qnxWWnNw8/Xr5Ki1bWaL5CxcFvINtXS6Sf8U/f1w7tr/veBxdyfOE3n3kcrl0oqnJb3p9/VGzfoVi3B030um0TqD9GDpTxh6PR97mLy4Ewy0Dhcgzj8ejkqeXa8jQYVq2siSsj1tXlJdp3csvaeY992nZypKwn74yGIEGIyiXkZkdMKhnF6junmhqCnphac8/Q7CyCqctWXU2bXb19Uc1ZOgwpaalK2nAQDPIMmzESGVkZjsGeZxYn24ZNmKkWlvbzKCAVb+kJLW2tspre9rOMHd+sV+9CSac9tFVnWk/kUjo3UcJCQmaNn2Guf35Cxepb9/+jkHYpo8blZGZrdS0dLlcPfT+u+8oITFRI0dlaFBqmvZUV0alboQqp3AY7eBwzSF5fK+sdTVdoQTahyG/YKqZz/fe/4Di488FsAAAAHBpimqA6ERTkz6qrTUfOTf+Ga8GWF+diORiKRRj4Lxh/boOTzgFelWqvv6o+SqP9Z9xkbT2+TWO398IpjPpsCvdtFFF8wr18ovPacbMuztc2Br7MJ5SiESO73sUq1YsUZHvVYFAd7Arysv0yIIHOxxHqH9GWTsdR6g8D8Z7+pRaW1sdLyK9zc1hBSjkuwgumleowzWHVPjQgg4XwU77MS6S1Mkytq4fSRkojHrq8b0uVDSvULK8bhSMUbYrlizW5JumRBwkMgK91153vRISE81XpkIJVnebm70BL56t+WflVFZWTnUwkM6mzc4I7IybeINaW1u0b2+1jtXV6tqx1yve1cMxmOXEeLpldEaW35MsdsGCAcZ344w6Y33aM5BQ7aOrOtN+IuE9fUonTpw0X7ky/jm90itLXR438QY1fdyofXur5W1uVkZ2tlyueDXUH4tK3XAqp+SUQYp3ufyWi0Q00hVKsH3k+77hZJzvX3j2GbW1tdkXc7R6xdKolDcAAACiK6oBokP798nlitett99pnyVZHn+ffHOB4l09HO+Id1Z11S5NvmlKh9cIAtlbXaXklJSAF8dz5xeHdbFtF2k6rO64a5Z5EdtQf0xtra32RSTfPqzf3EhNS/f7SGkg9rvYozIygz690lmBjiNUngfj8T25Ys3b1LR0Tb5pihlorK8/quSUFHP+rbffaX4Q1u12674Hfmhe8Aa6I25cEFm/pXHd+InmdhRGGcfHx5uvUsqXDvleC4m0DILlWWpaur73wD+afzd93Cj5glCLnnjSMX35BVPNbXlPn5LX67UvElJFeZlOHD+u22fcpcM1hyJ6Rc1ed92+7yhZAyDB8s/KqU7k5OZp4te/EbAO5hdMDfh0TDhpC8UI7Hw1e6z2/22v5AtCDPnKUMnXR4Zr65ZSXTNytEaMHOX3apKVEXS3fmNn2vQZGjl6jN+TH27fq1eBhNs+nBiBOuO1R6NdBhKq/XSFx+PRsbpax28aOTm0f5+am736avZY83Xn6qpd+mr2WHktrwN2tW401B9TfHy8bsz/pjnthhvzFX/FuQCR2+1W8c8fj+jbQQojXcZ+jdczc3LzlJ2Ta9tKcPZ9GOeblJTBfsH50RlZYT1B1NljBQAAQPe7vN+AlF/YJzoZN/EGfXLmjCp3VfhNz7p2rHr1SlDZtq1qaWnRoYP7NfW22/Wtb39HBdOmq2DadH32+WeqObBfktTa0qLrx01QzcH9Qe9ot7W16qYpt+rmW6apqalRV/bqpavS0lW+/X21tLRItjTVHNivQYOv0p3/cI+537HXj1Plzh1y9eihsdeP0749u3XsaJ0k6fSpUzp54oRunzFTU6d/WwXTpiv/m1PV1NSoY0frNG7iDZKksm1b/dIlST179tT14ycp57rrzX0Zx1m6aWPAdLS0tHQ4LiM9kjQ4LU3f+Yd7VTBtuiZ+7Ua9tXmjtv15i9++JanmwH6d1Vm/ZXdX/1VHP6rV9eMnqe6jI2Z+D0q9SiNHZ+jD8jIdPnhA14+fpFunz1DBtOnq26+/zp793MyX48eb9I2bvqlvffs7fmUWqUDHESzPTx5v6pB2I5+NaZW7KvzyduLXbtTmN/7bvBNdc2C/Mr+ao1un36GCadN1/OOPFXdZnOo+OqI91VX62jdu0u3f+QcVTJuuq9LStebf/o8+bmywpf5cmefk5pnLxse7dPLEibDr2rVjc/VxQ4O+O+cH5/K5b19zXyc+boyoDP764Y6AedZwrE6Tv3mL7rhrli+d8Vrzrys1/JqR6t27j97a9Ef7oalv337mtm68aYr2VFXq9XX/2SGvre26dx93h/aT2Ke3Ro3J0jtvbfarw3b2+r7tz1v86q6RBuNpvd593EHzz54We50Yk/FV7dzxF8VddpljHZww6evynDzp2PfY25VT2uz54CSxT28NH3GN3vjv9Tp96pQuu/xyjZs4ya/PG3b1iKD9mSR9csarnOvH6ZTH41iWktTS0qLKnTs08euTzXqQ2Lu33vnTJrX7gkXn8mCyTp06pbOff272adayqfuoViNGjgqrfVjrhnxp8HqbddOUWzV1+rd17djr9GF5mZIGDFS5L6hnrVvB2o+RF4bTp04pNS1dt3zrdnOZa0aN8du/ve5W7qrwa78F06Zr5OgMx768paVFQ4dfrcTevfXmH19XS0uL+vbtp5zr8rT9/XfNvqgzdcNaxh83Nmjfnt1+58aqv+5Uv/79tW/PbrP/85w82eEca2WvI6HSdfrUKcVddplZDwanXqW/HzooV48eAdu2nX0fxvlmR9l2s68qmDZd7e3SFVdcrg/Ly9TW2hKwPzHKK9SxAgAA4PwL+2fuoyXV4WeqAUTPD+b+SGXb3uvW9mW8XhLOT8dHojv7B7fbre9+v1DrXnkpoqeeLhS37afCAQAAAKA7RfUVs3DMuOvuqH2cGkBHv139m25tX8YrROF+nPpi4fF4tHrF0ksiOKQgr9YBAAAAQHc4bwEi49dOEhIT9fvfldhnA7gEzJ1frPkLF2l3VSVPtXSTHN+v3I3JzNLv1/z2kgrCAQAAALh0nfdXzAAAAAAAAHBxOW9PEAEAAAAAAODiRIAIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHEEiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHFxw0dlt9snBpKalq7CBx9Wr4REc1r5B9u09vk15t+zZs9R7rgJ5t9nvM0qWfWU6mqPSJJycvM08577FO+KN5fZsH6dSjdtNP+eO79YQ4dfbf7d2FCvkqeXy+PxSGHsg3RGlk4AAAAAABDbIgoQXX75FUpOSVHcZV88eOQ9fcov0OB2u5XQu4/5d/vZs2qor9fnn38mSXL16KGkAQMVFxdnLnPy+HGdOeM1/+6fNEA9r7zS/PuztjY1NjSovf2sFMY+SGdk6QQAAAAAALEtogARAAAAAAAAvnz4BhEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxLioBIj6xZ/V7776qUYkfG6fBQAAAAAAgItcVAJEPxzymb6R9LleHfupxvYhSAQAAAAAAHApiRs+KrvdPjES1/Q6q//O+0SXx5372/OZ9J3yK3Xok8hjT3PnF2vo8KvNv9ta2/Tyi89Jkmbec5/a2lpUsuop1dUesawVHbNmz1HuuAmqOXhAq1cstc8GAAAAAAD40oo8iuOT7Dqrh4a26uFhrWZwSJLcV0grMz/V5XHhx51ycvP06+Wr/IJDAAAAAAAAOD86HSB6YmSr7ruqTVMHfi6lXCZN6CldeW7emMR23Tv4M/sqjtxutwqmTVe8K15nvM1asWSxiuYVqmheoT7Y9mf74gAAAAAAAIiyTr1idk2vs9o47pNzf6RcLuX0kC6Lk86clbZ9IrVIRz+N09e29bKv2kF+wVRNmz7DfJ2sorzMvohycvP8XjGTpMIHH1avhERzGePVMLfbrcKHFmhgcoo2rF+n0k0bO6xfV3vEnBbvildjQ73qjx5V1rU5fq+YGWkzlH+wTWufX+O4PQAAAAAAgEtVp54g+vYg39NBCfoiOCRJvS6TxvWULpcG92xXRmLoD1anpAyWJJ08eVyH9u+zz3aUnDJI8fE9/KYNHX61Zs2e4zctEGtwSJIGJqco69ocv2VmzZ7jFxySpNxxE5RfMNVvGgAAAAAAwKWuUwGijISz5/4zxhIcMiReLg25QpKU08e3XJRVlJfpkQUPmq+ilX+wTZKUNGCgfVFHozOyzCeHFi8qVtG8QtUcPGDOd7vdGjJ0mCRpw/p1KppXqA3r10mSMjKzzf0/9tMFPD0EAAAAAAAueZ0KEPWNbz/3vaGB5wJBHaSfm57sCv32Wn39UUlS3779NWzESPtsR8ZHrZetLNGylSXKHTfBvkhQRiDpcM0heTweSVLTx43m/ITefZSQkCBJmjZ9hpatLOnwNBEAAAAAAMCXRacCRCfb4qTEIKsmXC7FtevUZ7anixzsra7SGW+z4l3xmnHXLKWmpZvz7rhrlnJy8/yWl6QbbsyXJL3w7DMdnv6xMl5fM54YshsydJjcbrffE0N2xhNExj/j+0QAAAAAAABfFp36SPX3Utv02Lg2aWKAj1C3t0tveHXrB720zxskkORj/xi0wfhwtSS/j0LPuOtuDR1+tX1x8wPTc+cX+80/c8ar+Ctc5vqjMjId9yfLNmbNntPhyaQz3maVrHpKySmD+Eg1AAAAAAD40ggdvXHwn8eu0PHGdskb4CPUf/9MpY1XhBUckqTSTRu1YslinfE2+01va2tRQ/0xv2mStHVLqdpa2yRfQMf4BpFh3Ssvmds6423W23/a5De/dNNGv3WctrH2+TUdpgEAAAAAAHwZdeoJIkka2+dzrRnXot4Tekq9LYGg059rz+YW3V1+ZVivmAEAAAAAAODC6nSASJL6XtGuf/pKq2Zktqt/8mU6c+qs/rMiTssPutRMcAgAAAAAAOCS0KUAEQAAAAAAAC594X0kCAAAAAAAAF9aBIgAAAAAAABiHAEiAAAAAACAGEeACAAAAAAAIMYRIAIAAAAAAIhxBIgAAAAAAABi3OX9BqT8wj7xfMgvmKq5Dxcp/5tT1dTUqE/OePWj4p/pjrtmaeToDJVt22pfpQNjGwXTpqtg2vSw17NLTUvXTx55THGXXaaaA/vts6Mmv2CqZt7zPVXu3KGWlhb7bJORnm99+zthHZfb7TbzzsiLpAEDVbmrIux9hmvW7Dm67wc/VMG06WbZHTta57dMfsFUzfnBD7Vvz26dPnXKnD53frHGTbzB71hycvP0wD89pP1/26vrJ0yKalqtjDxy9ezRoYyjUf5z5xfr6mtGqnJXhX1WWHJy8/TQTx7RzbdMc8xTaxknDRiorGvH6hs3FwStF1ZO2587v1gz7/1eyPoVSjS2E6x8LrRZs+dElNexIBptRt1Q7uH2nU59ZrDlnUQ77Z0VTp9sCHTOzMnN0w8f/olOnjiuY0fr/Op8tM8hXWFPZ6Tsx//Z558FLTunemKcWy8WqWnp+lHRIzp1ymP26/bzbHe6VPrHUPXYno/htqtA27WObazjoAstUHq76lKpB9HkVGeikQehzi2R9oORpCvYWLa76k5Xnc8+zz4Wd8qnS1kk43l7/ceXywV5gig1LV03fH2yXnj2GT2y4EFVlJfp1tvvlLe5WUXzCrV6xVL7Kh3Mmj1HeeMnafGiYhXNK1TRvEJ9VHtYbrfbvmhIdbVH9NhPF6h000b7rAviK8OGa90ra1U0r1ArlixWckqK8gum2heTfCeKhY/+SodrDpn5sHhRsXr06NmpvLBKTUvXoieeVE5unuTrGD/99BNzP7sqylUwbXqH/eytrlJbW5uSUwaZ01LT0pWckqLklBSlpqWb0/slJam1tVXe010bQNnTGokLXf5ut1sF06Zr8xuvm+3Bzto+1j6/xj47KKft5xdMVUJiohYvKg6rvQUSre3g0tLZNpOTm6dFTzzp1wd0xazZczR3frH5dyR9pyRtWL/Or99MSEzUL59cHrX0BRONvAi3T1aIc2ZFeZkeW/hjx76ns7rSJwfS1XS63X3N49+wfp0m3zQlYP535dzaHccertUrlnZbX3whj+t8iqRdXezmzi/WrNlz7JNxEYn0XNDVfvBS5tQHdWefZ9eVsfjFLtR43j7e6qpobw/RdUECRKMyMtXa2qpD+/dJvpPxkKHDVF21y75oQEkDBupwzSF5PB5z2muvrPX7+1L1/rvvmB1/Xe0RNdTXKyVlsH0xpaala8Zds7T5jdf9OiqPx6PnnvmXqOeFx+PRa6+sNf/eU10pl8ulhN59/Jbznj6l1tZWjc7IMqeNysiU4uKkuDi/wFFGZnaHcow1Cb37yOVy6URTk32WKWnAQDV93GifHBan7aekDJa3ubnL+R6t7QDREG7f6cTj8WjpE4+qob5eN+Z/0z77ohRun6wv+TkzXNbjdbqRYTjf51ZcXCJpVwBiR1fG4hc7xvOwihs+KrvdPtFJTm6eZt5zn+Jd8WprbdPLLz6nivIyv+ny3ZEt3bQx4PS584s1dPjV5nYryv+izK9eay7X2FCvP7/9J912x10d9mU1a/YcDRk6TCVPL3eszKlp6Sp88GH1SkiUJJV/sM0c6M2aPUeDUtPkcsWrb9/+ev21V/S1yTerbPt75h3x/IKpmjZ9hrm9moMHzIjqrNlzlDRgoPm32+1W4UMLzPUD7Tu/YKryxk/SC2t+q3vn/MBvf8Y2Dtcc8huQ5hdM1eSbpqhk1VOqqz1iTjfm5Y2fFDAPZFmm/uhRZV2bI9mOxZ7WxoZ6lTy9XMNGjPQrP2O6sZ9A6TXY88i4g2V0rmufX6PUtHR9v3CuXn/tv8ynWrojrXIoI6N+7qoo1x//8KrfvFDpkO94csdNMPfX2tqmY3W1jnmhIPXJ3k7saTfSPTA5RZLM9jA6I8vMX3teW4/1RFNTh+1/+smnSv/KV8y07NldpaSkAX77DVbvDPa2bLRxa96c8Tb7bcOaD9a2bS+fQOz5sXVLqbKyc8w6JFvZyNb2w5lvTeMZb7NqjxyWy9XD8Y6KUYcrd1Vo3ISvKd4VH/SY5VCnQ61vzWdr/bC3B+M4jDzat6daY3Pz1CshsUM/GqiMAm3Tzl5e9nps1AUrp3z4/e9KIkqrgpxL7PsMVoft6bey96uh6mxlxYfKyhlr1klrOuz5aZTfdeMndsiL1SuWdqjf1roSKo/D6ZMDnTPtfbG1T7Hmx3e/X2j23wbjDqC1fdjTaq239vK01jFjX5UVHyrf9+SXvc5b02msY81Lp/ph71cVIN0Gex1wEqhsA52P7NPt5SdLGdYfPapRYzJ18uRx/b9XXtJd997X4ZiN9CX07hOy7IKdy6z519hQr/qjR5XYu3eHfAlUprfefqeSBgyUJDO/7f1GoP7GKic3T7fd8R39rmS1X1/09lubVbppY4c2ay93p371yN//rqxrc1Rz8ICqq3b5lam9re3bU62UQYP98llhtKtAdcV+PHLoy5zqfrhlZe8n7fXdGGfaxw79kpIiHoc6sabF6Txpbx+Byt1ertZpRjqClXWgfLaOiayMfG5tbdNVaWlhjVvs5WasY++PjP1u3VLaIU05uXmaMfNurXv5Jb/65XRsoc6LgfarAG3Qmh9G3krqUGflq0f2Pt4Qqo7a25SRj8kpgxzzwzotWP4bQvVBTn2etd4b+RxJuzLYj826nL1+2PPeer3ptG17HbCvH6hsncoq2HktWJsMNZ5ymr+3uirk+NWeN07nZ+t0g71+2KdJcjz/Wq9h7OOCE01NHbZp7yuC5ZG93Rn7sfb/9n7YqY1dKsL6BlFqWrq+O+cBvfXmRv3bqqf0p00bdOxonfILpur2GTP18ovP6fn/W6LPPv9MkjTs6hF+0xsb6nXr9Dt08sRxrX/1FY0cnaH3/vyO/m3VU/prxQ41NTUqaUCynl66WHuqq/QP372vw77sKndVKCc3T7d/5x+UZHsP1Dqw+LdVT6nqrzt18y23mu9JZl07VsOvvkZ/ePVlvfDsMzrlOanrx09S3UdHVHNgv/ILpmrKLbdp7fNr9Pz/LdEH772riV+frPSvDFXlrgplXTtWvXolmO9n9uzZ02/97/3Pf9Thv9doxZNP6IP33pWrRw/VHv67hl09Qlelpev9d99WcspgfWXocHMbY7KyNSbzq3rtP//DfF89P8gFjiRNmPR1XXFFvN59+y37LNOwq0co57rrdeTwufQ0NtRr4g3f0FmdVc2B/bp+wiTVHDpo5tPEr92olrZWbfvzFu2u/qsysr6q/1r7gv5r7QvmO8f2k7eTK+Ljde3YXO3/215dFhenr+dP0V+2v6fm5tPmcV99zSiNHJ2hP79TqtOnTnVLWg3WMurbt59m3nOfNr/xul5f958dyi9UOmb5LrRW/K/HtX7df8rVs4dy88br6Ee1ju8jB6tPb27876Bpb2lp0dYtpRo5OkMH/rZPv1n2a7MOG3UwWH10ypvt772rpAED1drSol89+s+qP1qn68dPVGNDg9nWvnX7nTr89xq9/+d3zLTYlW3b6rcdp7wZNPgqXZc33vyWyeSbpmjViqV69T9e0Fmd1Y3531Tlzh2S5FcGTqz1bsWTT2jzH1/XzbdMU1LSQFX9daf5DYAxmVnmPqr+ulNTb7vd/FZOqPn2NJ48cUJfu/EmnT7lcXwfu3cft8ZNvEF93H31m6WLteWtzcq+7npdfc0o85gDlX3lroqw1h86/GozP8+2n1Vj/TElDUwO2MedPN6k68dP0pCvDFXJqqf06n+8oNGZWRqT+VVV7tyhMVnZmnDD1/Uvv1muV//jBZ04flxnvF717uMOuE17H2ytYyc+btQ/fPd+vfvOn8z1Lou7TPW2dWoO7FdjQ72+MnSY/uU3y/XWpj+a2wmU1p49eyore6z+bdX/p/Xr/lNJAwYqIytblTt36N233+pQ/wyh+k57m7fq27efxmR+VVV/3aXrJ0wKWWdHjs7UH159ucO57tjRuoB9VummjR3ywl6/jbri6tkjZB6H0ycHO2f27uPW2OvHad+e3R36F+O8Vb79fV3hcpn539LSotS0dE36+jf0pzf8z9PHjtZ16HdaWlpCtj+j742Li9OvHv1nbf7j68rJzVP22OtVtm1rh3Ta22tjQ73i4uJ0zegxju3G6FuDDaIV5rk1UNk69bk9e/YMWn4Go14m9u6t3yxdrLc2bVSPnlf6HbN853WjTFw9egQtu2DnMnv+eb3N+sZNUxz7u0BlmnXtWGVkZZtjus8+/0yTvjbZ/GZPsHOC1SdnvBp7/Xi1tLaY5/uMrGsVH+86d64ZmKzssbkq3bxR10+YFFa/2tbWpl89+s8d6nHPnj07nEsmfu0bfucShdmujDy+8aYpft8zy865Tmc//1zlH2zT6VOnwq774ZaVtS+a9I38DvX9oyN/11ubNnYYO0Q6DnViT4v9PGmMwXdXVZr5O2jwVfp6/s0dvl/T0tKi9K8MDZiOUGU9KPUqjRydoQ/Ly8w028dEVkY+/9nXFkONW9xud8BzUKC2t+VPm/zqsiRN/uYtaj/brvWvvuKXns6cF532G6wNGunat7tShQ8tkLe5WU8tWdxhnCxJ4ybeoE/OnHEcy4aqo2OysvX5559r5f9+Uh+8966yr7teffv1V9n7f+6QH9+Z9V19dOSw3v/zO0Hz3ypYHxSozxuYkqJbvnW74uNd+tWj/6zGhnpdP36ieY6212Vru7LXU6exeKh2bb/etJ4nDTfmf1MvPvd/9ep/vKDPPv9MeeMnmt9wDVa2zc2nwzofy3JdHKhNBhtPKcB4P9T4NVjbCbU/+/lAtjoT6Pzr1Fca44LP2tqC9hWh8sje7mQ7Fzv1w9Zxx6Wm06+Yud1u5Y2fpM1vvG5G0ko3bdSO7e8rb/wk7aooN6cf2r9P3uZm9UtKUmpauvr17+/3usvojCy1tbZEHGVbvWKpFi8q1pChw/Tr5auU43sndVRGprxer3Zsf1/yvWqwf99ev1eePqo93CGKa8jIzPZLv8fjUdn29zRk6DC5w3wH3Yj4ejwevf9ux4vsLaVvql///maaR2dk6cTx4+bFTKrvO03rXlnreIETicaGev3xD69KvneXP6o9bL52Ubppoxm1rQvzlQzjHdxAAyb5yry1tVWjMjI1bMRIJSYmqKH+mE40Nalf//5KTUvvcMzqhrTaud19zVcH7HdvrQKlw+17HbJs+3tmfS3dtFE1Bw/YtvCFaNSn7mS0jxtuzJd8da9f//7aU11pXzQop7zZUvqmEhITlZqWrozMbO2uqjTLe8f299Xa2qZhI0batuRs2IiRcrlc2lL6pjlt3Ssv6cyZZsmy/7ff2mzuo672iHZXVSojMzvkfPnKyprGivIy7aooN/fnpK2tTZs2rJfH4zHLNiExUW63O6yyD7a+JCUkJJivNrz/7jvyeDxh9XHW49y6pdTvFYl4l8t8tWbnjr+orvZIWNsMxmiLdbVHtHPHX+yzgwqUVo/tlZ5wXvWIZt9prw9OddZ6DqwoL1PDsWNmnkXSZ9nrt1EXjLqpIHkcTp+sIOfMcBl147rxEyWH18WDCaf9ydf3/v53JebfW7eUmucMK2MMYt1eRXmZeb51ajfyXdwmJCb67aMzIilbQ6Dys7P2oV0V6Fwmh/odTn/npObgATMvdmx/XydPHle/pKSQ5wQrj8ejwzWHzLSlpAxW5c4Kxbt6yO12n+ufmptVV3sk7H5165ZSv30Y7G1NtnOJIdx21dhQ7/dtr6J5hXrh2WfU1tYmRVj3wy0re18UqL4HE2ocGog9LfZ6Y5xLjGORb18ul8vxfB8sHeGUdaQaG+rNvixUHe3MOchIY974SXK73eY+IvmMhoKcF50EaoNW3/1+obzNzQED4+EIVkcrysvMVzON9pw0YKD5f6OuW8eYofK/M6xp3FNdqTNnvGZfYL0elUNdtrerYMJt18GuNyXp35/5V3N9p1efA5VtJOfjSNtkuIKNXzvTdgzB6oyCnH9DjQuCiUYedaYfvliFFSCqqz2ida+s1ZRbblPxzx+X2+02M2BvdZXfssb3TqwXl9ZvoBiVvqH+mDk/yfJOp9O+gvH4vhux+Y3XNeOuWUpNS1dKymANTE7RosVLtWxliZatLFHuuAlm0CYYt9uthMRE1dcf9Zt+oqkp7IptdL7LVpYEPIa62iM6cfy4Rmdkye12a1BqWofBTGtrq18+2dXXH/W7kOwMt9ut4p8/buaT9ZG/YEK9g+vxeORtblZKymD1S0oyT/bWwFHSgIERnTA7m1arG27M1+6qyqDBoWCsdTkc0ahP58Oe6krzImxURqZOHD8e9ITmJKF3HyUkJGja9BlmGc1fuEh9+/bX0GHDlZCYqNxxE8x5ixYv1VVpaR0GMfLd4TeWMy5kQ33QPFDZGO1k0ODUoPOvSkt3LKvOSuzdx3F7kZR96aaNevutzZq/cJHfBX1X+riK8jKte/klzbznPi1bWaJ836s8nd2mx+NRydPLNWToMC1bWWI+nREt+QVTzfTce/8Dio8/93h5MKH6zmCMehYnRVRnDda+MZI+q19Sktx9+2n+wkXm8tOmz1BC4rlHnUPlcag+2eB0zgyXfdCWkZkddjAjVPsMdB5rqD+m1tZW++SA21OQdmMI9Z2FUGlShGXb3W2kMwKdm6Ip2DnB6dtP9fVHNWToMKWmpStpwEBzUD9sxEhlZGarumpXwHRH0q+GOpdYhduugglUV8OpZ7KUVaC+KFR9DySccahdoPy3SnH4lonxbUqnvjNQOgLtK5KyDiWcOtqZc9De6irzwnLYiJFqbW0zL+gvhKHDr1ZCFALjoVjHbtZXi6xBH+sYM5z874qG+mPyNjc7jgdCtatQutquDTm5efr18lXmsffqde6cH0ok5+NI22S0dKbtGALVGQU5/wYqk3B0NY862w9frMIKEMl3UbHk8Z9JvrsqySmD5HK57Is5BoBGZWQqPj5eDfXn7qzan5SxP61g31c4rFHX+vqjqjl4wO9uTlGYv45mDWzYNTd7wxpQyBckKppXqMM1h1T40ALHjqK6apeGDB2mjOxz7/Jao751tUe09IlHzXxyYj0BdYbb8u63kUfBnoQxrH1+Tcg7arIc37Vj88xAkJG/I64Z1eFJsmA6m1a7rVtKNSYzS7M6+aseTp2FcZJxEq361N2sgTtjIB4p7+lTOnHipF549hm/dvfIggf13rvvyNvcrA2WX40y/jkF64z2Y6xvnBSs0Xn5+pt4Xz/kVDYGb3Ozjh2tCzr/o9ojjmUVKkASSPPpU47bU4RlX7ppo4rmFerlF5/TjJl3Kyc3r0t9nHx97CMLHtSKJYs1+aYpyi+Y2qVtGkGHonmFkuUVnq7K973fbdydt96VDyScvjMQt+/uU9n298z6EG6dNRj1JdI+60RTkz6qre3wJMLSJx6Vx3d3LlAeh9snWzndqQyHEUy+9rrrlZCY2OEmUSCh2qfToFa+Nu401gi2PQVoN8b0UHU61Lk10rJVN7aRrrL3T53t75wEOyc43YAw8n3cxBvU2tqifXurdayuVteOvV7xrh460dQUlXOqU4DBei4xdKZdOQlWV4PVfYNxzMH6okD1PZRg41AngfLfWm8CXSC3trYFHPM5pSPQvhRBWYcSqo525hwkW9BrdEaWjtXVhizn7lRz8IC8zc0Br0eiwfgmjpGH5R9sM+fV+c6n9jFmqPzvTuG0q2C62q5l+a7OqhVLVOT79VX7k4zBhHs+7kyb7KrOth1DoDoT7PwbrExCiUYedbYfvhiFHSCSpTHJFwCKj4/XqIxMyVcR8gumdpiempauyTdNMR/hsz4tpAABJdn25eR7D/yj351PaxBqb3VVyJ83Dqa6apeyc3LNgnX7fibc6ODr64/6/Vz7rbffaX64zO12674HfmhWMPudDyvjUcapt31bOz8s8+tMUtPS9csnlwc9hjrfKyD33v+A33L2NARij7Sm+n6KPpRZs+cEfDLKam91lRISEpScnOLXuKqrdiltyFfU3OwNORgxdDatdh7PSZWseqrTQSKjXhqPDsv3eKdR/k5C1aeuClYfw+XxPRo6+eYCxbt6mHe6ciL4+VWPx6NjdbUBfw64umqXJgf5WelQjBOf9RembrgxX/FXnBvUe3x3U6z7MPqf6qpdIefLd7d4TGaWOT8nN09XpQ0x9xeprpb9HXfNMtdtqD+mNt+TFF3p44y+Wr6TqdfrlbqwzdS0dH3vgX80/47GHXeD/Y7O6IyskHegwuk7naSmpevH//yovM3N5uAwnDpr7QvyC6YqOSXl3J2vCPusQ/v3yeWKd7wpEiqPw+mTg50zI1FRXqYTx4/r9hl36XDNobADceG0P0kamJxiPjJvtJf9+/Z22I/T9nJy8zTx698I2G7kK6NfPrk8aJmGOrfan0YMVbahyi8YY8BrvLZo5FlXGfkXzf7OLtQ5wc64qP5q9ljt/9teyXcBNOQrQyVL8KKr/aoxTg10LjGE067C4VRXnep+MMH6omD1PZRA49C584sDjpFCnSeNsZ+1L7v19jvlcsUHHPMFSkeosrZfc+Tk5ik7J9dv28GEqqOdOQcZtm4p1TUjR2vEyFF+rzNeKKtXLO22IJHb9rSX2/f6lVW170Px1kBGqPzvbsHaVSjRaNf2pxlHZWSG/QSRIjgfd6ZNdlVX2o7Bqc4EG1s5lYkxLgjVV4TKo1Dn4q70wxejK+wT7Ny2L7cb3wfweDxa98pazbznPk2bPsPvS9/W6bJ8ndzoQKwNx9o4Au3LSWtLi+YvXGT+bf9yuj0Ngb5M78S4OLj3/gd07/0PSLavypdu2qiMzGxz/5U7K9TYUC/5Kmdi795atPjcHUojXU6DFqNj7Nevb8Cobyhrn1+jPdWVfscqX5477dPKOgi+9/4HdMbbbF4s2ucXTJse8dfYv7j49A8E7a2u0uSbpoQ9mFOU02qto0OGDtMLa35rXySo1SuWau78YrOMaw4eCHr3OFR96qpg9TESTuXSz/J6YDjW+n5ZwcgbWX4xonTTRqWkDPZrt42+r/yHw1puxqPLxvdJDGufX6NZs+f47cP66wiRzm9sqNfe3VVK7N3bXD4SXS17j+ek37ob1q8z+7BAfVyok/0J3y/bGeuVf7DNTGegbQbrN72nT2nQ4FQtW3murw5WphXlZbrhxnzNX7hINb5fawlmS+mbKnzwYXPbH9XW+t2Bss63/wpGOKZNnxH0WMOps/VHj2rho7/y+9VNo70E67PsebF6xVL9fs1v/Y5XvjLfsf39sPM4kGDnzEgHx9VVuzTlltuCfqfMqU+2ty85/HpJY0O9rh2bZ5ZLzcEDAduLfXtG/gdrN+EKdm7du2d30LK1H/sLa37b6fLzeDzatGG92e+d8Tbrw/IyjRydYV80YkZ/HW5/Zz+ucI4h2DnBiXHBZoyJGuqP6Yr4K/zOS13tV+tqj6hk1VN+bc1+Lok2e12VQ90PJlhfFKy+b91Sqpn33KfsnFy9/OJz5roGp3Go2+1WvKtHwPZtPxZ7vXHK3y/S6jwuc0qHwijrutojevutzWZfbk9LOILV0VDnoGAO7d+nltYWeX3fzgrEfi4IdV7sCuMX0hY++qsO57uu8PhuMhrl0NbapgbbONQYY1q/+aMQ+W/XmT4omODtyrmuWtnbgiJs1zt83+01jr2xoT6iJ4gUwfk40jZpZx9vhbpuDdV27NtzyjOnOmOtA07nX3uZWMdlwfqKcPIo2Lk4WD98KQr7Z+6jIcfhZ+uMRxLDPbF/2cwK8nOcwPmU6vATjj+Y+yOVbXvvou3knPoUAN3LeHQ8ksFlOLpruwhfrI/JYpF9HJqTm6e8CZP029W/sS/arezpuNQZN73Ltr/nePELRAPnTXSHiF4x6yr7o3T2RxJjTU5unsZkfTXkRwGB82HGXXd3+Dj1b1f/5qINDhmPmkfyhBOArjEeqw70MUxcuvILpuqqtCFB70Tjy8VpHFpRXnbeg0NO6bjUGa+qXMiPU+PLjfMxust5fYII51hfpQv0WB1wvuQXTDUft7yY70DYX0FVkMeQAUTf3PnFGjr86ohe54kEd0LPL6PvNzi9Zokvp4tlHHqxpCOacnLzNPOe+9TW1mK+xgtEW3efjxHbCBABAAAAAADEuPP6ihkAAAAAAAAuPgSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcZf3G5DyC/vEcKSmpetHRY/o1CmPjh2ts89Walq6fvLIY4q77DLVHNhvnx22/IKpmnnP91S5c4d69uypHxX/THfcNUsF06Zr7PXjVLlzh1paWuyrae78Yl19zUhV7qqwz4oJbrdbPyr+mVw9e3Qp/y+0UMcRrXoWqVD1/3wLlU9OotlGUtPS9cMf/USHDu7X6VOn7LO/tGbNnqNp374zYD/U3ebOL9a4iTeobNtW+6ywzJo9R9+4uaDT64fL2o9fiHxCaNGuyzm5efrhwz/RyRPHHftIex/a1bqsTvaDF6to5EdX5OTm6YF/ekj7/7bX7NPzC6Zq7sNF+to38rVvz+7z1tfnF0zVt26/84LlxZeNve3ZhWq7l5Jo9QmhtmMfi56vc2u4rO25dx/3BRk3R8v5zNvzsS973fqyjpfsxxkOe7uKtmiPe75Muu0JorraI3rspwtUummjfVanDRsxUu++/ScVzSvU4kXFkqRbb7/TvliXzJo9R3Pnn9s2zr+c3DwteuJJpaal22c56o56hq6LlXa09vk1WvrEo/J4PPZZ58XqFUu1esVS+2RHqWnpWvTEk8rJzbPPuuR9Gevb3PnFmjV7jn1yRCLZRrTrckV5mR5b+GNVlJfZZzmKpC5/2Ti1zYstP1LT0nXD1yfrhWef0WM/XaC62iP2RWLOpdLvRJrOSNvuxSTSMWS0RHMsGml5RSqaae1uTn0jYkd311X7uCeSMdOXXbcFiLpDRXmZ3n/3HUmSx+PR4ZpDShow0L4YAAAAoiQ5ZZAkqaH+mH0WAAD4EokbPiq73T7RidvtVuFDCzQwOUWStG9PtVIGDdbrr/2XKsrLNGv2HA1KTZPLFa++ffvr9dde0dcm36yy7e/pRFOTbrvjO/pdyWrzrlNObp7ftFmz5yh33ARJ0hlvs0pWPaW62iPKL5iqvPGTVPL0cr87mzm5eZox826te/klx7scc+cXq+njRq19fo19lvILpmra9BmSpLbWNr384nOqKC/T3PnFGjr8anO5DevXdYhaGumpP3pUWdfmmOv3S0oyt1lz8IDfnb9A+7NLTUvX9wvnqnJXhcZN+JriXfEq/2CbtpS+qcIHH1avhES/vJHvToORb7Kk2Sivsu3vqXTTRuXk5mnmPfdpV0W51j6/pkN52tNsZV/WfgyzZs8xA3VG/pV/sM0v7wOVr5U1n+RL0+9/V6LChxZo355qjc3NU6+ERL/9248z0rTUHDwgSQHrihzStWH9Ou2trtL3C+ea9V9BjtGef9Z5Rpkf+fvflXVtTtByCMaeD06s6WtsqFdra5uO1dWax20/TmtanOpmoHIMpx0pSLsIZ1+paelmm2hrbdMH2/6srOwcsz+xH0ukdSBQ2uys/ZOkoHXVzjjOre++rck3TXFc3l537PON+r56xdIOfZMsZWi0/3hXvOQr/5Knl+vW2+8011eI4w5Uv63lbWzX/hSKkbbKig+VXzDVcdlQfVmgfA1U36x1RJY6YGyv/uhRjRqTqZMnjzum2b5+Z9ptoPwMdkyH9u8LWOahtmcc0+lTJyVJ/X19YaBtBzrXqpN12Zof6155ya+PtNdlpzGEtS7al29sqNf/e+Ul3XXvfX79rlO6rf2gvS+wlpV9HCJbm7LXAXs/Yoj0vGOd35m2qU70z+G0Uytr3ozKyAy4b/uy1jGedYwW6Phly3MFOZ8F2k84Y0kn1jxtbKhX/dGjSuzd2y9fA/UBgfodK/v61mOOtDxDjQUD7cspncb4JVB9Mfb/+mv/ZfYZwfoCe1vduqVUWdk5fu3UYB+bz5o9R0OGDjPro7U9ezyeoPXGWLa1tU1XpaVpz+4qjR6TaS4bzhgy3PS8sOa3unfODwJux15n7XU6UPnYOZWX0/nMXv529uUrd1Yo/Stf0e9KVst7+pRfWu19kLUu2+vphvXrHK/p7Mfr1N8k9O4TVh4Y7Omy940K0N/a66O1bQUbJzkxjqvp40a/MeNqy5jL2pfmF0zV5JumOJaNPS/tYxKjPJy2a4hm36AgaVIY5zV7+Tj1gVbGcVZWfKisnLFm+VjXmxUklmDNm2DlZ+0znMb3Vvb27TTusrPnWVfq18UsrCeIjEI9XHNIRfMKVTSvUC5XD/Xqda6yGZKTU7Rpw3o9suBBVVm+a3Jo/z61trZqVMYXHfcNN+Zr/7695gl9yNBhWryoWEXzCrW7qlIz7rrbXNbOPvCIhNF4VyxZrKJ5hdr8xusqmDZdbrdbq1csVfkH21Rz8ICK5hUGrOgDk1PU0vKpiuYValdFue69/wFlZGaraF6hXnj2GSUPGmQ+Dhlsf07i4+M1cnSGljz+M73w7DPKzslV4YMPq2TVU1q8qFher1c35n9T8pVLjx49zXwr/2Cb8sZP6rBtoxFvfuN1v87IKM/Fi4qVkJiofN+Fm5WxrLe52Sz7zW+8rhl3zVKq5RHeocOvVnXVLhXNK9SG9euUnZNr5kG45Vu6aaNeePYZeU6e0Ioli/0a1NjcPJWsekpF8wr1Ue3hoHkYSVqqq3b5nYzt7OX3wrPP6NNPPrEv1mG71mMcNmKk9u2pNvPaWobylXli794qmlfYbZ2IPX1l29/TVWlp5vz8gqmacstteuHZZ/zqxCzLo5bWumkch1M5htOO7PlqbxfB9mWc7N5+a7OK5hVqyeM/08jRGYqPP3eSkqT0IUPNbW9Yv05jMrPM+mrPC3sdCJW2UCKpq/Hx8eZgwtiX0bbCbXtW1r7phWef0VVpQ5RfMFUV5WVatWKJPCdP6IVnn3F8lSjYcefk5mnEyFHmvFdffslcJyEx0czLd9/+k982rQYmp2jYiGvMY/E2N+u73y+UwuzLAuWrU32z15EVSxZrxMhRZj8gSSmDB2vJ4z9zzAtj/d1VlWZ6d1dV6rtzfuBXR4O122D5aXA6Jkla+sSjqjl4QOUfbNMjCx40g0Ohtmcc0//65SL9r18u6rCNUH2RnVP6gtXlQPlhP+cUBRhDGJyW37en2r5YSOH0a8HMuOtusw4sXlSsv9cctC9iCnXeGZOZZZbdiiWLNSYzK6K2Geo4gvWZkbRTJ8HOzQowxhudkaWGY8fM4FCg44+E0346O5a0t6dNG9ZrlCW4EKoPcOp37ALVn0jLM9RYMFh/FyidweqLk0B9gVNbvSptSMC2XV21S0OGDjPXHTJ0mPr27a9hI0ZKklJSButwzSEzOBSq3gxMTtHOD8tUNK9Q//f/PB2wngZKf7jpaT597ntbgbYTTLDysXMqr1B10c6+v8WLipUyeLDfGMngdrtVMG26Nr/xupm24x9/LDm0kUDjX7tA/U2g9hBIsL4xUH9rr49G27LXGadxUiDG+NC6vVmz52hvdZVcLpdZVyQpIzNbu6sqOwSHFGJcGolo9Q0KI03B8jlQvQnla5Nv1qYN6838n3xzgV9bCBRLMAQrP/s5wD6+D6T59CnHcZddOH13sPRdSsIKEA0bMVIul0tbSt80p6175SWdOdPst9xHtYcdM9R4HSwjM1vyVdh+/ftrT3Wl2SGXbX/PbPhbSt9UQmKiY8MxKuXbf9rkuK9Q7I13x/b31dra5tfAQ2lsqNcf//CqJGlPdaXOnPFq65ZSyTeA8TY3q19SktSJ/bW1tWnThvXyeDw6tH+fTp48bq5v5KMR0fV4PHrumX8x821PdaVcLpcSevcxt+d299WMu2Zp8xuvmwMDe3l6PB6VbX/PLB8rY9l1r5y7IJTvGLxer98grebgAXP7O7a/r5Mnj6tfUlLE5RvI229tNvNw65bSDsdpFUlaSjdtNCPMdm63W3njJ/ntu8LymqN1Oft2rcdYUV6m115ZKwV4NbKtrc2sP93BKX32487IzNauinKzTRl1whg4yVY3jfkJiYmOA5RQQrWLYPsalZEpr9erHdvfl3xp3bRhvdra2szt//sz/2pue291ldra2pScMijsvAiWtlAiqattbW1a98pav30ZbSvctmdl7Zsqysv0Ue1hpaQMti/mKNRxx7tc5msmO3f8xVwuISHBPL73333HzFe7xoZ6/f53JebfW7eUql///kpNSw+rL4skX+11pK72iPbv26vRGVnmMtY6YGesb+SlfG3aOhgM1W5D5aciPKZwthfsmOSrE8H6IrtI0hcsP+znHAUYQxiuGz9Rkvzy/7VX1up0s/PygYTTr4ViPd/a+36rUOcda17W1R7R7qpKx3Ouk3COI1ifqQjaaWcY+zOCusYxV1ftisrxG4w6G42xpL09VZSXaVdFuTk/nD4gHE71J9LyDDUWDKe/swtVX+wC9QWRtu291VWSpITefZTQu4+8zc1qaKg328qg1DS/8gxVbxob6s3jDiZQ+sNNT6jtBNOZ8rGKtC7a9+dxGCPZGeOEutoj2rnjL3KHOf4NJFB/49QeOiNQf2uvj0bdtteZSMZJNQcPmE+fWNuq9/Qp7d+3VzfcmC/Z+iMngcalkYpm3xAqTYHy2WCvN+HY/MbrZt9XUV6mhmPH/NpCoFiCIVD5OZ0D7OP7rgqn7w6UvktNWAGifklJam1tldcXQe8M64l6VEamThw/rorysnOdckKCpk2foWUrS7RsZYnmL1ykvn37B2w4ra1tOtHUZJ8cktvtVkJionLHTTD3tWjxUl2VluZX4SPRUH/s3AnF4b38ru7P4/HI29ys+vqj9lmm/IKp5rbvvf+BDncIbrgxX7urKv3ubvVLSpK7bz/NX7jIXHfa9BmOgwOnsjfSFU6F70z5dpeE3n3kcrnCrjvhLh/OMc6dX2zOsz4uHY5Zs+eY6y5bWRJxJDrUcRj11F7PTjQ1hTX4iVRX20VKymB5m5vNE4CTnNw8/Xr5KrMsjLuZ4eZFZ9PWVUbbUhTaXiRCHXdFeZnWvfySZt5zn18dLN20UW+/tVnzFy7Sr5evcrwjGkhD/TG1traaf4fqyyKRkjJYA5NTtGjxUr92FywYYuVUx7ynT6m1tTWsehAqPyMVze11pS/qLKe6HIxT/kcqGv2a8QTCspUlKv754x3Oj+EI1OfU1x91POfaReM4utJOw2W9mz5sxEi1trZpx/b3u3z8dtEYSwbKUyunOhhJH6AA9SfQvoOVZ6ixYFf7u66ItG0by43KyNSojEw1fdyonR+WKSMz2wx2HNq/L+r1JpBw09MVXS2fSOui0/KBeDwelTy9XEOGDtOylSXmx7ED5X84AvU3Tu0h2iK5vumsE01Nam09F2zbU11p3uiy9kdOAo1Lu6KrfUNn0xSo3nRG08eN9kmd0pU6G47O9N2XsrACRE4Hn5wySPEul99ywdTVHpG3uVmjMjKVkZmt6qpdkq+TO3HipPm4lvEv0ONdHo9HTy15wnFeKEZD2rB+nd++igI8HtxV3b2/fN+7jsajdC88+0yHOwRbt5RqTGaW3+NvJ5qa9FFtrbme8c/p0XansjfYG4mTSMu3u8XHx/udUI0G7yTYCdgq1DEa77gb08s/2GbfRFBrn1/TpbrjdBzW4w4WdGhu9oY98AtXNNqF/WTfLynJDCjk+L5JsWrFEhX5Hn213s0MVgeikbausKalq20vEuEcd0V5mR5Z8KBWLFmsyTdN8QsSFc0r1MsvPqcZM+8O++IzOWWQXL5zSDh9WSTq64+aj+hb/9lfjQkk0EVIuDcnwsnPSERre13tizrLqS4HG0MEyv9IRKtfW71iqYrmFepwzSEVPrQg4jQ59b/mvDAu4qJ1HJ1tp+Gqqz2iE8ePa3RGlkZnZOlYXe25tHfx+O2iNZaU5e63wXrBHqgOhtsHGOz1R77jtu9bEZanVVf7u66yPi2iEG3bY3kKbMQ1o7SnulJ7q6uUkJioocOvVltrS7fUm0DCTU9XdLV8Iq2LTstbx0h2Ho9HS594VEXzzr3yPXd+cdD8D0eg/sbeHuzH1FWRXN90Vr+kJLl8392xvvZq7Y/sQo1Lu0uwutfVNDnVm84IN1AajmDj+66K1rn4UhFWgKih/pji4+P9vlVww435ir/C+QQQSHXVLuWNn6SExETzsU6Px6NjdbVhvccrX2EX//zxsL8hYFddtUuTb5ri+Mhxd+jO/dnvEozOyOpwAvB4Tqpk1VN+QaJD+/fJ5YrXrbff6besE6Pzs76bnl8wVckpKWYZBhNp+Xanutojaqiv9/u2yXXjJ5ofJbMzBg7W8svJzdPEr3+jw3KBjtHonIwLerfvEcjzyejUgh13ddUuv29muH2vchqD/GjrSrvYU12pvn37m6+huH2PQhvsdzRHZWSad0XCqQNdSVuk4uPjzceTJZltcsf297vc9iIV7LjzC6aaASHv6VPyer2SpDvummXWmYb6Y2qzPBFkNzA5xa/MCqZNN78dEk5fFom91VVKTkmJ+Gk7w97qKiUkJPj1kbfefqdcrviw7yYHy8/O6Or2LmRfFOkYwin/77hrluIktba2mo+jp6ala/JNUyxr+gvVrxnpGuV7ZTMnN0/ZObnmsvc98EOzn+hsUNbpPGKkO9DFhF2o4wglUDvNL5iqXz65vNN1ym7rllJdM3K0Rowc5feKR6jjr68/quSUFHP+rbffGfC8rCiMJY00Wb+3kZObp6vShpjLONXBSPqAYPWnq+Vp19X+riuM/A+3bct3Du/fP0kJiX10aP8+X3CxSWOvH2fWiXDqTbSEk56u6Gr5RFoXTzQ1BR0jWaWmpet7D/yj+bfxNIdT/hvj32D9pgL0N7169QrYHuZG8afFI7m+CddVaUPM4zHqoPEak8f3itHkmwsU7+oR8HXHYOPS7hSs7nUlTYHqTU5unhY98WTQc4p1/G2Maa2vqHZWOOP7rupq3z13fnGnA2nn2xX2CU7qao+oZNVTKnzwYS1bee4bEsb3IyKxt7pKk2+a0uEDXsYvLSxa/EU0vSYKX/3OHTfB7xF64+vrKSmDNX/hInO69Rc9tvi+BL9sZYnfl9U7q3TTxqD76wprWiXpo9pax7vudbVHtO6VtZp5z30a4vt1ht+v+a3fugrwBXqP7zHCwocWmMvav5AfSiTlW1FephtuzNf8hec+sGr9Zkk0GL9oYaSl5uCBoO+nrn1+jWbNnmOWn/Fle7tgx1i2/T1Nmz5D06bPUFtrmxoa6v3WPR9Wr1jqlz77cRvlfu/9D+je+x+QwviFiWBCtaNg7SKUivIy9fP9aqCRp8avmMkXXMkbP8k81saGer+7IqHqQLC0dbXN2rW1tan59OkObcvYT1fbnlWd793ze+9/QAXTpnfI62DHfaKpSTPvuc/85YbyD7ap1PeLEtY6s2H9uoB36xsb6nXt2DxzGzWW9/rD7csCcapvRp9n7M/6S16hOJ3zrHXA6akuu2D5GY6tW0o18577lJ2Tq5dffK5T27Nv40L1RU75GWwM4bR8zcEDeq12rTZtWK+Z99yn3HETdMbbrA/LyzRydIZ9E1IY/Vpd7RG9/dZmM08aG+q1d3eVEnv3lsfjUWLv3mY/YW+bkbCfR2Q734bTNhXkOELxeE46tlOni4auOLR/n1paW+Rtbu4wxgt2/KWbNiojM9ucX7mzQo1B6mY0xpLGssY+rWWvAHXQfh5w6ncMwepPV8vTzjrGs/d3FeVlHdIZzRsM1n0b4+1gbVuWemK9qDp96pTU3u6XtlD1xklnxpDhpqezQpWPnb28SjdtDFkXrYxtGvuzj5GsvKdPadDg1A7blUP+G2kO1m8qQH/z3rvvaOz14zu0B0mKd/UI+N2eUH2jncfjCfv6JlwNDfUqmDbdr61at2X0R8GCBKHGpd0lWN3rSpoC1Zvrxk/UiePHg45R648e1cJHf6V4V7xfnYqGUOP7UOxjJnv7jHbffTEL+2fugS8jd4Cf08WlxXhFKdCAKZgLVQdSLT8lbD8JAcCl5kL1pdFivIL5ZRzsn0/GqyvWn0EHnOTk5ilvwiT9dvVv7LMuGYzlvvCDuT9S2bb3Lpp8uNTPSRdSWK+YAV9Wt95+pxISEqJylwgXhvHI72HfT+NGijoAAF1nfUX2UpNfMFVXpQ0J+CQDwmO8chHqKQJAvqedLuXgkCTNuOvuoB+njiW/Xf2biyofGN93Hk8QIabMmj3H77XDrryygwsjNS1dhQ8+rF4JX7wrHckjnhdLHeCuE4Avg5zcPM285z61tbVckL60M/ILppqvXCjEKz8IzLhDb/3OR6DX+oAvE6MPCfa6H86vi2V8/2VAgAgAAAAAACDG8YoZAAAAAABAjCNABAAAAAAAEOMIEAEAAAAAAMQ4AkQAAAAAAAAxjgARAAAAAABAjLu834CUX9gnBpKalq6fPPKYvvXt76hg2nQVTJuupAEDVbmrwr6on1mz5+gbNxeobNtW+6xuNWv2HE379p2q3LlDLS0t9tldNnd+sa6+ZmTI40fkcnLz9NBPHtHNt0xTe/tZ3f//+6HiLrtMNQf22xcNC2UVvvyCqZp5z/e6rd10Rk5unn748E908sRxHTtaZ58dUmpaun5U9IhOnfKEtf6F6rMuVaHKJ1j+G+eVrrTvzurOfXfntgEAAIDuEPYTRDm5eXpw/kLtrqpU0bxCFc0r1OJFxRoydJiKf/643G635BsUL3riSeXk5tk3cd6tfX6Nlj7xqDwej30WbHJy87ToiSeVmpZun3Xeud1uFUybrs1vvK5HFjyotzZv1GM/XaDSTRvti+ISMWv2HM2dX2yfHLaK8jI9tvDHqigvs8/CRaAr5VNXe+SCte9o7tveh0Zz2wAAAMD5EFaAyLhg31VRrrXPrzGnezwelTy9XJJ03fiJljWAzkvo3Ucul0snmprsswAAAAAAQDeIGz4qu90+0S4nN0+33fEd/a5ktepqj9hna9bsOUoaMFBbt5Rq5j33Kd4VL0lqbKhXydPLdevtdyppwEBJ0tDhV0uSyj/Y5hdsmjV7jnLHTZAknfE2q2TVU6qrPaL8gqnKGz9Jra1tuiotTRXlf9Gwq68205Kalq7CBx/W229tVummjXK73Sp8aIHKtr8nScobP0klTy+Xx+MJuA9jG70SEh3TFsjc+cVq+rjRcdlA2zTyavWKpZIv+Gak17jTnF8wVdOmz5B8eVh/9KgSe/c217Fuu621TR9s+7OysnPMPDG2OTA5RZJUc/CAuW5Obp5fGW1Yv06SzP3Zl1eANBrTDtcc0trn1/il2b4Np/pjzweDPX2NDfV6Yc1vde+cH5j7N9ZVmPWpsaFera1tOlZX61hW9n1uWL/OPE7rdmTbj1E3648eVda1OWprbdPLLz6nfklJZl7Y89KaT8byxlMX9jpjraNGfu/bU62xuXlm2VvXD1budsGWNY7LaDdz5xeb+Wy0aY/H0yG99jIwWNeXJX/t61uP1y41LV3fL5yr11/7Lx3avy/ivNi3p1opgwbr9df+SxXlZR3arnX7FeVlHepnJG1SIfLC3mcYT1atXrG0w3bs/ZQ1jbKVlSQVPrRA9UePatSYTJ08edwsKwVoh9Zpkvz2bS1rYz+VFR8qv2Bqh/n2tIXKfyt7/xJpu7Lvy16PgvWX3tOn/PYdqm+xl4+RB9eNn9ih//v970o69JvB+kkjDyt3VWjchK8p3hXf4VgAAACA7hTWE0T9kpLU2toq7+lT9lmSpPr6o0pITNSh/fu0asUSeU6e0AvPPuP3etfQ4VerumqXiuYVasP6dcrOyTVfQ5s1e46GDB2mxYuKVTSvULurKjXjrrvN7Q9MTtHOD8tUNK9Qr697Wa2trRqVkSlJGpWRqV4JicrIzJZ8T59I0t7qKnN9+S6ERowcpRVLFqtoXqFeffklyTLgf/utzSqaV6gVSxZrxMhRXX5FbsZdd5uv4y1eVKy/1xy0L+Iov2CqJt80xUznpg3rNWrMuWOVQ3qXPP4zjRydofj4c8ENa+DG2HdCYqLyC6b6vbplHOvxjz9W6aaNeuHZZ+Q5eUIrlizuEFTweDw6XHPIzGNJGjZipFwul7aUvqn8gqmacstteuHZZ/z2OWv2HL/thKOivKxDHWp2qHeR1Key7e/pqrQ0+yYk2+ts1jyRbztjMrPMslixZLHGZGaZF8jy1c2Wlk9VNK9QuyrKde/9DygjM1tF8wr1wrPPKHnQIDNd9rLd/MbrKpg2XW632yxX6yucu6sq9d05P5Db9/qmJI3NzVPJqqdUNK9QH9UeNtcPVu52kSybXzBVCYmJZl6++/afJId6GKzdrF6xVOUfbFPNwQMqmlfoFxwKdbzBhJsXRfMK5XL1UK9e5y7qI2Uvt1BtMlheVFft0pChw8xjTE1LV7/+/bV1S2lU8kSSUgYP1pLHf9bh9dpD+/f59Z2SdMON+dq/b6/qao9oVEam3zEkJCT4PRk6MDlFw0ZcY6bN29ys736/0JxviEb+R9Kuho0YqX17qlXkq8ter1c35n9Tcigbe3/pJFjfEiiPQvWh8tWjUP1kfHy8Ro7O0JLHf2Yei/VcCAAAAHSnsAJE0VBz8IB5F3XH9vd18uRx9UtKktvt1pChw1S2/T3zYmZL6ZtKSExUqu9bDo0N9dqx/X3JEqxISRksSUpJGazKnRWKd/WQ2+3WqIxMeZubHe+4xrtcSk4ZJEnaueMv5kWR1+s1t19Xe0T79+3V6Iws29qRM+5Eezwevf/uO/bZjjIys7W7qtJMf0V5mXZVlJvz7en1eDzatGG92traJFvgxphftv09v+COkXd1tUe0c8dfzOnBbCl9U/369zcvlEZnZOnE8eOqqz2ijMxs7aooN58MMPZpvRCOtkjqU+mmjao5eMC2BX/2PDG28/Zbm82yqKs9ot1VlX552dhQrz/+4VVJ0p7qSp0549XWLaWS74Lc29ysfklJkkPZ7tj+vlpb2zRsxEizXI1tyZfnLpdLw0aMNKdZ07N1S6lcLpcSevcJq9wNkSwrSQkJCWbg9f1335HH4+lQDyNtN+EebzDh5oUkrXvlJZ0502xZO3z2cgvVJoPlhbGMEXgZlZGp1tZWHdq/Lyp5Ismv7lvZA71GcGpPdaXkaydGm6qrPaKG+nqzXchX13//uxLz761bStWvf3+znzZEI/8jaVcV5WV67ZW1kuUYjb7XXjb2/tJJoL5FYeRRMOH0k21tbdq0Yb08Ho85PyExsdv6UQAAAMAqrADRiaYm8+LLSUrKYHmbmx0vSkJJ6N1HCQkJmjZ9hpatLNGylSWav3CR+vbtbwZz7Orrj2rI0GFKTUtX0oCBZvBl2IiRysjMVnXVLvsqqigv07qXX9LMe+7TspUl5tMSKSmDNTA5RYsWLzX3nztugnmB0VnGHeRlK0v8PuIdjNvtVkJiourrj9pnmULldb+kJLn79tP8hYvM45k2fYYSEs/dvS95ermGDB2mZStLIvpocF3tEZ04flyjM7Lkdrs1KDVNW7eUBkxzqDrTXSL9fpHH9x0te54E2o7xtJxTeTbUH5O3uVkN9cfss8x8yh03wSyXRYuX6qq0NPVLSnIsV+/pU2ptbTUvToMJVu72tEaybOmmjXr7rc2av3CRfr18lRkg7Gq76erxBhPqicdIBKrfVpHkhT1Ik5GZbQZ0ujNPDNbg+6iMTJ04ftzvtbzinz9uHoP1tUAnDfXH1Nraap8c1fxXiHZlmDu/2C/vDU552hWR5pEhUD26UP0kAAAA4CSsAJHTqwkG40kLp6BMOLynT+nEiZPmY/fGv0cWPNjhWxWGvdVVcrlcGjfxBrW2tmjf3modq6vVtWOvV7yrR4eLekNFeZkeWfCgVixZrMk3TVF+wVTV1x81X32x/nN6RSBSq1csVdG8Qh2uOaTChxZ0uPgOxH5H2n6hab+Q75eUZL4ycaKpSR/V1pqvBBn/jNdNPB6Plj7xqIrmnXs1JJIgkfF6TEZ2juSrFx6PR97m5g5plqTmZm/ULhLD5XRBbVycBeKUJ07bMXTmgtPIpw3r13Woa6WbNgYMPLW2tgWsz1ahyr2zy8oXJCqaV6iXX3xOM2berZzcvC63m64ebzBOF93JKYMU73L5LRcJe/22tslI82JPdaX69e+va6+7XgmJiebrsN2ZJ4a62iPyNjdrVEamXzDdbfkOkJH+UE/dJacMksshT7sj/4MxvutkpLv8g21+8+15au0vI9GZPDJcbP0kAAAA4CSsAJHH91j+lFtu8/tOiTFg9jY3m4/dR8rj8ehYXa35/ZBwGE+zfDV7rPb/ba/ku+ga8pWhki9wYZdfMNVMu/f0KXm9XskXbEpOSXH8/opxt9hpXjBut1v3PfBD83isd43r648qOSXFfC3j1tvvND+uajxdMCYzy5yfk5unq9KGmOvvqa5U3779zVdU3G638sZPMucf2r9PLle8br39TnOaITUtXd974B/Nv5s+bvSbH4rxStTU276tnR+WmcGE6qpdft/pcPu+63OsrlYej0cN9ccUHx9vBhhzcvOUnZPrt+1oMS7E8sZPMvP/uvETzTy2C5QnRllMvmmKWRapaemafNOUTgdDq6t2+W3Pam91lRISEvzK7dbb75TLFe9Yn+2ClbtdJMvecdcss1wb6o+pzffESLB2E46uHm8wRn0zvkMj37d24q/4IkDR9HGj36s9M+662/EbOeG0yUjzoqK8TCeOH9ftM+7S4ZpD5qtrofLECFoar64Z9TFS1VW7lDd+kl9wyv7EXGpaupJT/NvMwOQUv36nYNp08/tFVuHkf7S4bU/muH03LAyh+stIhJNHwYTqJ0PJL5iqXz653LH/AAAAAKIhrACRLB8PnnzTFL9XZA7XHPK7U258f+Pe+x8I+9Wqtc+vkbe52e8VjVBPtlRX7dIV8VeYFzgN9cd0RfwVAQfbJ5qaNOWW2/zSXbppo+pqj2jdK2vNectWlpiv0gT64HUoHo9Hib17m8cz+aYp+v2a38rj8ah000Y11Nebr/f06NFTjQ315rprn1/jN79g2nTt3f3F/ivKy7T5jdfNV/IWPvor7dtTbX5Tw+Px6PdrfqsxmVnm8SzzvVLnPX1KgwanmtOGDB1mflPEuGidv3BRwLw3gnlqb/fLk9JNG7X5jdd17/0P+OWv8cs/dbVH9PZbm800248p2lavWOpXnzIyswPe6Q+WJ2ufX6PdVZVmWcxfuMj8tbzOKN200W97yyyvH9bVHlHJqqf8ym3I0GF+v0IVTLByt4ts2ZNmuc5fuEhb331bFeVlQduNky2lbyo5JcXcT1ePNxinbX9Ue9jvGzjG922MOmKfbxWqTUaaF/L1X1f2SjC//6MA6bbmiccXqM/OydWylSUqfPBhfRjgKctgjECUNThl7beNbRtBdENjQ72uHZunZb427m1udvylNqfjCJa/XeHxfafH2h+2tn7xfaFQ/WUkQuVRqD40VD8JAAAAXGhh/cx9rMovmKqUlMEXfADv9NPYVvm2nyXvTvaf/wZiUag2Gcr5bLPRcKmlN5gv07EAAAAA0RT2E0SxqHTTxk5fAEZLfsFUXZU2xO9JAyvjNZPDNYe6/WInJzdPY7K+av6aEBCLQrXJUIw2G+jXxtB9zmd/CQAAAFxqeILoIpNfMFXTps8w/25rbdPLLz5nfrA7NS1dhQ8+rF4JX3wvpfyDbd0ayDK+NTUwOUUb1q/r9CtWwKUoVJuMxNz5xRo6/Opub7PRdqk+dXMh+ksAAADgUkWACAAAAAAAIMbxihkAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxDgCRAAAAAAAADGOABEAAAAAAECMI0AEAAAAAAAQ4wgQAQAAAAAAxLi44aOy2+0TA0lNS1fhgw+rV0KiOa38g21a+/wa8+9Zs+cod9wE8+8z3maVrHpKdbVHJEk5uXmaec99infFm8tsWL9OpZs2mn/PnV+socOvNv9ubKhXydPL5fF4pDD2QTojSycAAAAAAIhtEQWILr/8CiWnpCjusi8ePPKePuUXaHC73Uro3cf8u/3sWTXU1+vzzz+TJLl69FDSgIGKi4szlzl5/LjOnPGaf/dPGqCeV15p/v1ZW5saGxrU3n5WCmMfpDOydAIAAAAAgNgWUYAIAAAAAAAAXz58gwgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYlzc8FHZ7faJkfj62Ct0ywSXBidfrubWHnr+tZP6y+7P7IsBAAAAAADgItWlANE/3tlTP/7ulVKPRF0+YKR0RQ+dPX1MP/91pdZuarUvDgAAAAAAgItQpwNEN1wbrzWPJUqSLkserbgr+52b0X5Wnx7cprsWnlb1oc/9V3LgdrtV+NACDUxOsc/ShvXrVLppo33yJWXW7DnKHTdBNQcPaPWKpfbZAAAAAAAAF1ynv0F037d6fPHH55anhdrOKP6KOH13mmV+J02bPkPFP39cbrfbPgsAAAAAAABR0ukA0YB+cef+c/as9Le90ol66WSjPm/cL0lK6R/5pjesX6eieYUqmleomoMHJEkDk1N03fiJ9kUBAAAAAAAQJZ1+xez/LEzQN8e7FOdpUfuVl0uuK6RP2hR3tl3tCS79x6YW/fxfz9hX68D6ipn1lTLrdOvrWfkFUzVt+gxz/fIPtmnt82s6rCNJZ7zNKln1lOpqj/i96tX0caNyx03osL59240N9Sp5erk8Ho+5viFQWhVkv6tXLNXc+cUaOvxqtbW26eUXn5MkzbznPrW1tZjrAAAAAAAAnE+RP+bj8x9v+l4ru0zS2XP/jftcUty5J4teNuZ3ksfj0eGaQ5KkhMREud1uzZo9xy+AI0m54yYov2CqUtPS9eN/ftTvW0a9EhL13Tk/8HtFbejwq/0CPcb6Obl5mnLLbeZ0q7nzi/3WkaQpt9ymnNy8sPcrXwDKGhyqKC/zmw8AAAAAAHAhdDpA9E55m1b+xydq791Dl3lapPZ2xXlb1d4rXr/87Rn9dX90f+o+sXcfDRk6TLK8irZh/TpJUkZmtkZlZKpXQqIaG+q1eFGxFi8qVmNDvfr27a9hI0aa2zHmW19jy8jMNufLsv2lTzyqhN59lJySorbWNr3w7DMqmleo8g+2Kd4Vr9EZWWHvN97VQ3njJ0mSNr/xuhkcqigv0yMLHtRjP13A00MAAAAAAOCC6HSASJJW/sen2vDnVrX3ukJxza1q7+3Sc69/qhf+2GJfNGJut9sMCHmbm9UuKSEhQfJ9vHrZyhK/p4lSUgZLvm8WLVq8VIsWL3X8ZTRvc7M8Ho8kqenjRnN6RXmZPqo9LFm2P3d+sZJTBik+vofiXfG69/4HtGxlid/TROHu96q0NA1MTtEZb7P2VlfZZwMAAAAAAFwwnf4GkeHaay7XK7+4UpedbtXZvj1188PNOnLM985ZGAJ9g8j+rZ6G+mMqfPBh9UpI9FvOYHzrx/rdoGDzJZn7tX6HSLZvEVWU/0WZX71WkhxfC7NvN9B+jaeVhg6/mp+8BwAAAAAAF5UuB4h6JyboN8XDNOTyj/W3Twfp4aXVamkJ//tD9g8821mDN/YPRcvyQWhJZgDJyljfaV1JHT4WHe+K7zDvhhvzNXT41Za1vviAdULvPmHtt+bgAW3dUmruwwhy5eTm8ZFqAAAAAABwQXXpFTNJGjlyhD51f097E+arfcA9GjH83GthXWV888f6ZM/a59eo/INtfssZ6mqPqGTVUzrjbbbP8lNz8ID5NI9s3wOyM+atXrHUbx2rcPcr32tsuyrKJd9rbPkFU+2LAAAAAAAAnHddfoJo1DUj9E+Fc+RyudTe3q5lK1brUM3f7YtdcPafmwcAAAAAAMA5XX6CqO7oMW3+09uqrKzSS//xXxdlcAgAAAAAAACBdTlAdLq5Wa//cbNW/9uzeve97fbZAAAAAAAAuMh1+RUzAAAAAAAAXNq6/AQRAAAAAAAALm0EiAAAAAAAAGIcASIAAAAAAIAYR4AIAAAAAAAgxhEgAgAAAAAAiHGX9xuQ8gv7RLu584s1buINKtu2tcP0q68ZqcpdFX7To23W7Dn6xs0FHfbfHazHml8wVTPv+Z4qd+5Q0sBk/eSRxxR32WWqObDfvlpEcnLz9MOHf6KTJ47r2NE6+2x0UU5unh74p4e0/297dfrUKftsP/Y6nJObp4d+8ohuvmWampoav1Tl093tKDUtXT8qekSnTnkuunybNXuOpn37TlXu3KGWlhb77Kix1r3efdxR6zOi4XzlAfxZzyPB8j01Lf2iqi/wZ23bl8XF6YcPF+vTTz/RsaN1fn3rhegH3W63flT8M7l69jgvdSfaY5hw0h9oHNoV0Wpz4bbxYIyxx+jMrA7H2N3n7mgKlVajrO+4a5aSBgzsluuHC9EGraLdPgDgfAvrCaJ1r7ykfv37Kyc3z5yWk5uneFcP/fEPr/oteylJTUvXoiee9Duu1SuWavWKpX7LSVJd7RE99tMFKt20UfId/6InnlRqWrp90ZAqysv02MIfq6K8zD4LvoHgrNlz7JO7ndvtVsG06dr8xut6ZMGDlM95NGv2HM2dX2yf3Gn2OrT2+TVa+sSj8ng8fst1J3ufcaFdiDy4mDn1/xfSxVZfYoG9n0B47GOYroyHwhVobBYJezovtjb3yRmvkgcNumj6pO5w6+13ytvcrKJ5hVr7/Br77E6J9vghmHD2ZW8fAHCpCStAVFd7RPv37dUNN+ZLlgvpnR+WcbGBL42E3n3kcrl0oqnJPgsAAKDbtKtdtYf/roJp0+V2u+2zvxSSBgxU08eN9skAgItI3PBR2e32iU5S09L1/cK5ev21/1K/pCTljZ+kkqeXy+PxKL9gqqZNn2EuW3PwgHmnJyc3T7fd8R39rmS16mqPSL4IfNKAgVq9Yqm53SN//7uyrs3xW9dgLC9JQ4dfLUkq/2Cb392HWbPnKHfcBPNv6/zUtHQVPviweiUkmvP2VFdq5j33Kd4VL0lqbKhXydPLdevtd5ppyy+Yah6nJBU+tEBl29+TpA7HK0lNHzf6pcm4y2A/HmteVpSX+aX9jLdZJaueMvPKEG4+Vu6q0LgJX1O8K97clvf0KTPtxp0ye9qs27Ln5Yb161S6aaPcbrcKH1qg+qNHNWpMpk6ePK7/98pLuuve+zqUX6BthJPOgckpkqS21ja9/OJzqigv86tj1ulyKN/KnRVK/8pX/PIqkLnzi9X0cWPA+mAEQI1jt+ahMe1wzSGtfX5Nl9qBnf2YrPU5krywrmfdn5F2SX7HaXCqk5L86q18aTHaSELvPvp+4VxtffdtTb5pinolJHZIn9N2Z9x1t9muZakr0axD1j7ru98vdGyrxrRg+Wtnz29r3bO3O/uygcpUvmM90dQUss7MnV9s5p21zjpNv278RL9+26gD9ryyllWofteJsd3Kig+VlTPW3L5RfvJte1BqmlyuePXt218vv/icDu3f55cea/sJlndOdaqu9kjQepKcMihoezfY9xtoOYNTOUoKer40jsXexxhtq/7oUWVdm+NXl431nc6XVvZ9WZc3tt/a2qar0tL8yscQ7PgDrW8tD/u50V5/o3HMgdprsPIP1E8EaucK0IcbnM7F1n7Syl4m9vLft6daY3PzHPvPUOuWbX/Psd/Iyc3TjJl36503N+kbN5/rmw3hnmetrMdoLRs5lI/BXpesbTXcPsNad4Kl1d63NTbU6687d+imKbfKUHPwgH7/u5KQ+WadFqivsZ4HA51f5DAWtDL29fLv/113zJxljivkcOz247Mfv1U49cq+PXsZBjpu2crhjLdZtUcOy+Xq0eFY7fsw0mDv9+1pczpXWI/Teq6Tr87sra4K2PaNdNvrY7DzsTEv2L6sY+B1r7zUoQ+wrmsdUwVKAwBcSGE9QSTLU0STv3nuRFi2/T1zkDjlltv0wrPPqGheoRYvKlZCYqJmRfDYdnx8vBJ791bRvMIOJxXD0OFXq7pql4rmFWrD+nXKzsk1H8OdNXuOxmRmacWSxSqaV6gVSxZrTGaW8gumSpJm3HW3dldVmun7e81BVZSXadWKJfKcPKEXnn0molcvSjdt1AvPPiPPyRNasWSxVq9YquqqXRoydJjcvrs+qWnp6te/v7ZuKbWv7icnN08jRo4y0/7qyy/ZFwlbfHy8Ro7O0JLHf6bFi4rl9Xo146675fF4dLjmkDIysyVf2pJTUpSckqJU36PWSQMGqrpql9xut3r06KnFi4pVNK9Q5R9sU974SeZxSVLK4MFa8vjPtPSJR3W6ublD+YXaRrB0Ln3iUdUcPKDyD7aZr3nlF0zV5JummHm0+Y3XzTtsxkn+7bc2m+WbMniw4uPPXfiFK1R9sOehJA0bMVIul0tbSt+MSjuwcqqz8g1cws2LFUsWa8TIUR0eVzcGat7m5g7HqS7Wyfj4eE2+aYpKVj1lpm/GXbOUmpYecLurVyxV+QfbVHPwgIrmFZrByGjWISt7W83JzVO//v3NcgyUv3b2/A5V98It0xeefUaffvKJffUO8gumKiEx0cyjd9/+U9DpVtY6UDSvsENZGYL1u6F8bfLN2rRhvXlMk28u8Fs3OTlFmzas1yMLHjQvEg7XHDLzJyExMWgfLl/fP2ToMPNYd1dVasZdd5v7CFRPQrV3w6iMTL/2lJCQoOvGT7QvJkVQjulDhprLbFi/TmMys/zy3GpgcopaWj5V0bxC7aoo1733P6CMzGxz+8FeRwmnTxqYnKKdH5apyNfu7EIdv319e3lUV+3yu6AKRyTHbM9ze3sNVP7h9BPdJVT5j83NM/vPj2oP+x1PqHUl6dD+fWptbdWojExz2uiMLDUcO6a3Nm/UYz9doCJfm685eEC7KspDnmeDcRoP2Rl9pdGGi3xt9btzfuC3/VB9hiFYWo2+zehLiuYVat+ear33TmnQdDrl2w035mv/vr1mcChYX2Own1/CHQsamr2ntWnDeo3J+qrjsYfbd9sFqlf2/LL3vcGO214O615Zq2HDr7Ht+RynNmf0+6GOxXqusLdTp/GDgrR9OZy7V9jGSoHaWbB9BbuGMQKExjFu3/pnKch5DQAutLADRJL0xz+8KpfvjuuO7e9LkjIys80BhnwngbLt7/mdIENpa2sLefKsOXjA7Ix3bH9fJ08eV7+kJLndbg0ZOkxvv7XZvDNQV3tEu6sq/S7mjTvhHo9H77/7jjk9Woz8MAbPozIy1draqkP799mW7Cje5VJyyiBJ0s4df+lwdzJcbW1t2rRhvTwej1kOCYmJcrvd2lNdqXhXD7ndbiWnDFJDfb1OnDip5JRBSk1LV0JiovZWV8nj8ei5Z/7FvFjaU10pl8ulhN59zP0YwUHrfq3lF2obwdLpJCMzW7urKs182bH9fbW2tmnYiJEalZEpr9dr5r/H49GmDevV1tZm20rXbSl90+9bXKMzsnTi+HHV1R6JSjuwc6qzkeSFEdQdnZFl2ar03e8Xytvc7DiQMXS2Tra1tWndK2v90uf1es1Bd7jbjXYdstpbXSWXy6VhI0ZKDuUYKH/t7PkdTt2zl6nb7Vbe+El+/VdFeVnYfVRCQoKZJ++/+46ZX4GmG4zg5rpXvgj+2ctKQfrdcGx+43WzPVSUl6nh2DG/uvhR7WFzvjXYKkv7CdaHG32/tT/aUvqmEhITzYuLrtQT+S5+jeOvqz2ihvp6paQMti8WUTn++zP/ai6zt7pKbW1tZpuwa2yoN7/zt6e6UmfOeM2+9tD+ffI2Nwcsj3D6pMaGerP+Ogl1/Nb1ncqjdNNG8ymicEVyzKHaa1fLvzuEKn9rHdq6pdSv3wu1rizlbATUjXKprtrlt5wRSDbyOlRedoXRV1q/Wbml9E2/flhh9BmGYGk1xmDWfb32ytoOfaCdx+FGWr/+/bWnutKxbtv7GkNXxoKGivIy7a78q/lZB6tw+267QPUqWN8b6rjt5VBRXqZdFeWWvQYX7rFYzxXhCtb27efuOttYKZx2ZmUfA1sZN6Csx7hh/Tpz+/bzGgBcDCIKEHk8Hnmbm3W45pA8vlcUEhITVV9/1G+5E01NHYIK3SXQd2Pq64+aJwPjQnjZyhIV//zxbhkc2gcXGZnZHQIpTirKy7Tu5Zc08577tGxliXnXJtoa6o8pMTFBw0aM1OiMLFVX7dKxulqNzsg6d7JsbjZPWPkFU7VsZYmWrSzRvfc/EPCJiGCisQ35LjoSEhOVO26Cub1Fi5fqqrQ09UtKUkrKYHmbm0PmczTU1R7RiePHNTojS263W4NS07R1S2m3tAOnOhtOXgxMTtGixUvN+bnjJpgDEPmeCElITNTvf1di2Zu/aNZJo89QJ7YbrTpkZx0MOpVjoPy1i7TuOZVpoP4rHKWbNurttzZr/sJF+vXyVWbgMtB0q35JSWptbZX39Be/8meUlVMAJBqCfXeiX1KS3H37af7CRWbeT5s+I2gfntC7jxISEjRt+gxznfkLF6lv3/5BB/ORcLvdKv754+b2Az0NE0k55uTm6dfLV5np7dXri9d9gmmoPyZvc7Ma6o/ZZ3UQrT4p3ONXhHkQrmDHHGl7vVh0tvwVwbrWIPiwESPV2trmFwjMyc3TDV+frN+v+a3fWK678tKpr/SePqXW1tag23fqM0Kl1Wlf4bIGP0ZlZOrE8eOqKC+LqK/p7FjQbkvpm0pOSelwnox23x2s7x00ODXgcQ8dNtyxj4lEtI8lXKHGSuG2s3A4HaPB6bwGABeDiAJEdsE68uZmr2OHGG3BBhnWQcLqFUtVNK9Qh2sOqfChBd3SEe+prlS//v117XXXK8H3RE44KsrL9MiCB7ViyWJNvmlKhwFBNBjBjWvHXq+kAQO1t7pKe6orNSg1TUOGDDMHYvm+d+mNx4lfePaZoE9EOInGNgxGHduwfp35eK7xr3TTRr9AoKFfUlLUggl2xuPjGdk5ku+Odne1A3udla9OB8sL49Fn6z/rk0I1Bw/I29wcsg1Eq04ag3lDuNuNZh1yYtR9p3IMlL92nal79jKNkwL2X+Eo3bRRRfMK9fKLz2nGzLv9gkRO0w3BAgVdGfAHYw1U2p1oatJHtbVmeRv/rK9+OeXdiRMnzVeojH9OryF0htvyXRdj24Gehgl2HrLK8X1nZNWKJSryvdpw5sy5AGo0RaNPiuT4DfHx8X55YG//0RRpe70YdKX8I1nXejNjdEaWjtXVmu3I7fuRka3vvm3eFOruvHTqKyWptbUtaEDRqc8IldZA+wpHXe0ReZubNSojUxmZ2eZTV97TpyLqazo7FrSqqz2it9/arMk3TVHvPl/009Huu4P1vceO1gU87vfefcexj3Eqs0CifSzhCjZWiqSdhSPYMcrhvNaZegsA0dalAJF8F8zW71IYgw9jQNJQf0zx8fHm46I5uXnKzsm1baXzjLs1k2+aYj7qm5qWrsk3TTG/qXPfAz80O93uPOlUlJfpxPHjun3GXTpcc8gcfAWTXzDVvEj2nj4lr9drX0Ty3Untaj5WV+3S8GuuUWtri+pqj5iPPA/5ylDtqa6UHO70jc7ICnrB6yQa27CqrtrlV75WJ5qa1Ldvf/NxbrfvVQ9DTm6efrnk/+twgdxZxqPsU2/7tt+v+EWzHQSrs8HyYm91leMdR7vVK5YGDRIFqpPGRbDxGLbRzqzi4+P9Hou/9fY7JV++Bdquk2jXITuj7o+fdKPfxVOw/LULVfesApVp8+lTHfqvnNw8Tfz6N0LWmTvummXWt4b6Y2prbQ063cr43ob1Gxr5BVOVnJIS1sVMThg/a239ZpSxbeM1BrtD+/fJ5Yo364tVsLw7Vlcb1ndSOsP+REyq79ttTpzOQ0Y5WtnvJo/KyOzS3elgQvVJoURy/LK8gmYt9+vGTzQ/Pitf2Vm/fXfr7Xf6zY9UJO01HHPnF4f8Ceuu6Er5R7ru1i2lumbkaI0YOcqv3RmvGNsDP9HOS6u91VVKSEjwa9+33n6nXK54v1evwu0zgqXVaV933DUr7D6iumqX8sZP8gvseDyeiPqaQGPB/IKp+uWTyx3T7aR000Y11Ndr5OgMc1pX+267YH1vqONu+rjR7ztYObl5uiptiH2xgKJ9LOEKNlaKtJ2F4nSM06bP0MjRYxzPazoP/RAAhNLlAFHppo3a/Mbruvf+B8xHfQ9bfn3BuAtiPKJaMG269u6Obse/9vk12l1VaT4iO3/hIr391maVbtooj8ejxN69zUdJJ980xXys2njV5N77H4j48U5jADB/4SK/jry6apeu7JVgBlxCOdHUpCm33OaXd/aBm6KUj3urq6T2du3/217Jd/Jva21RS2uLOUgzHms2HrsdlJoW8ZMbXd3G1i2lys7JNV+PKd200a98rY/jVpSX6eUXnzPzcOGjv9K+PdUR7S8SxoBJ7e1+A5hotoNgdTZYXtTVHtG6V9aaebFsZUnAV4yMV8wWPvqrDvMD1UmP7xs72Tm5WrayRIUPPqwPbXdP29ra1Hz6tLn/MZlZZtoDbVe2OpNfMDXqdcjOKMd+/fr6XYAEy1+7SOpesDK1918z77lPn5w5E7LOeDwnzfo2f+EibX33bVWUlwWcbk9PydPLlZCYaB7nZN/HxcMJbPdLSjK/2xRI/dGjWvjor7RsZYmm3HKb37ep7Dwej36/5rcak5llpseoC6Hyztvc7PeqQLgD61D9v3X+Ml99DxbUDFSOVsZrPkZ688ZP6tLd6WBC9UmhRHr8svQrxvFlZGb7PXVkXPAaedSjR081NtRbthCZSNqrk1D9RLR1pfwjXffQ/n1qaW3xa6f5BVM1dPjVGjr8ajO/lvnaWVfyMtB4yFBXe0Qlq57ya99Dhg7r8IuA4fYZwdLqtK+r0obI4/GETKcsASb7Tb5I+5pIx4KBrHvlJZ3xflHOXe277YL1vQpx3PY+z36OCiVax2IfP4QSbKwUqp1Fui+nY8zKGav6o3UBz2sAcKGF/TP3CE++5edO6ei/nOw/OQvEmh/M/ZHKtr3XIfAk26tJTsFuxA7qwoXzZcv7ufOLO/x8/MWMsSAA4FLV5SeI8AXjlZvOfJAQl4ac3DyNyfpqwF+sAGLBb1f/xjE4BFjdevudSkhI6NbXReDM+nrvpS7V8otilwLGggCASxlPEEXJ3PnFGjr8apV/sO2SucOF8Bl3Ywcmp2jD+nVfijuyQHf4sj25gPDNmj1HueMmmH+f8TZH/LoIuiYnN08z77lPbW0tl3zep6alq/DBh9UrIfGSGVsxFgQAXOoIEAEAAAAAAMQ4XjEDAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYhwBIgAAAAAAgBgXN3xUdrt9YiCpaekqfPBh9UpINKeVf7BNa59fY/49a/Yc5Y6bYP59xtusklVPqa72iCQpJzdPM++5T/GueHOZDevXqXTTRvPvufOLNXT41ebfjQ31Knl6uTwejxTGPkhnZOkEAAAAAACxLaIA0eWXX6HklBTFXfbFg0fe06f8Ag1ut1sJvfuYf7efPauG+np9/vlnkiRXjx5KGjBQcXFx5jInjx/XmTNe8+/+SQPU88orzb8/a2tTY0OD2tvPSmHsg3RGlk4AAAAAABDbIgoQAQAAAAAA4MuHbxABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIwjQAQAAAAAABDjCBABAAAAAADEOAJEAAAAAAAAMY4AEQAAAAAAQIyLGz4qu90+MZCzPRPUcMc/67JPvep1oExX1nyosz0S9Mmw6/TJ8Ot0ufekBvz3Cqk97E0CAAAAAADgAosoQPRpepbqvr/SPtnPsF/dorjPWu2TAQAAAAAAcJGKKEDUHn+lDj2ywT7ZFH+8VukrZ9snh23u/GINHX6137Sagwe0esVSud1uFT60QAOTU/zmNzbUq+Tp5bpu/ERNmz7Db55V+QfbtPb5NfbJAAAAAAAAMS+ibxDFtX2iKzz1ury5Sf1Kf6cedXuU+Ne31HfrS9LZs3LVH7SvEpbUtHT98snlHYJD4RiYnKLChxaoZ8+e9lkAAAAAAAAIQ0RPEEnSsXue1CdDc9Qe30NxbS2SpPbL4xXXflZ9331B/d551r5KUNYng9pa2/Tyi8+porxM8gWOvjn1W/r3Z/7Vb7kN69epdNNG5RdM1bTpMzqsN2v2HOWOm2A+fQQAAAAAAIDAInqCSJJcDQfl3vaK4lo/UXt8D7XH99AVp5vkLv+DvjXggK5Nb1efK6Wbx0iPfVva+GMpd2jgGNR14yc6Bockqa72iP79mX/1W95qb3WVznibFe+KV7+kJPvsDvILpmrZyhIV//xxud1u+2wAAAAAAICYFHGAqMdHe/RZ31R9Zfn/0MDXntSgF/9Z6avvk2vUV/WrvL169cE4ffgL6d/ul743SbomRcpOi7NvxpSSMliSdPLkcR3av8983WzZyhItW1miXy9fpZzcPPtqkqRRGZnqlZCottY2nWhqss8GAAAAAABAGCIOEPX62zZ9OiRTx+5dos/6DtanX8lW3YNrlNOnUUPiGuyLS5L+Vm+f0jXTps/QspUl5kepP6o97PfkUSClmzaqaF6hlj7xqDwej302AAAAAABATIo4QBT3WatS1/xIaj+rE5Pv08mJ/6AxZ/6qV3st1uq32vX4H9r1l0PSkxuk0t3n1tlz1L6VL9TXn5vZt29/DRsxUnW1R/TYTxdoxZLFOuNtti/eQfkH2/jOEAAAAAAAQBdEHCCSpCs89Up99kca/st8DX/iZo19+1dKiPtE3/96nMYNi9P7B6R/nNyuCSOkE17p4yBxHut3hGbcNUupaen2RTrYsH6diuYVqmheIT9dDwAAAAAA0EWdChDZ7fO9Qva/N0oPvdiu1z5s19f/V5z2HpX2HrMv7a+u9ojefmuzJKlXQqLmL1ykZStLNH/hIvVKSLQv3iV8pBoAAAAAAKCjqASIjp6UDjVKe462672fSX8qitO/fK9dz7/Xrp1HAv+CmaF008aAr5RtfuP1sL4vBAAAAAAAgM6JGz4qO3QEJwwFWe168n9I7iu/+MWyv9a26wfPxqnxtN+iAAAAAAAAuIhc3m9Ayi/sEzvjQEOc/lobp8FuqXePdv2hIk4L/1Nqag78E/cAAAAAAAC48KL2BBEAAAAAAAAuTVH5BhEAAAAAAAAuXQSIAAAAAAAAYhwBIgAAAAAAgBhHgAgAAAAAACDGESACAAAAAACIcQSIAAAAAAAAYtzl/Qak/MI+0W7u/GKNm3iDyrZt7TD96mtGqnJXhd/0SLjdbv3w4WJ9+uknOna0zj47atxut35U/DO5evZQzYH99tldMmv2HH3j5oIO+RMNqWnp+lHRIzp1ytPl/Jk1e46mfftOVe7coZaWFvvsDjpzXLNmz1HWtWMd64RRBgXTbtO+Pbt1+tQpc15Obp4e+KeHtP9ve/2mX4zCSevc+cWaee/3NHJ0RkT5F67urM/hSE1L108eeUxxl10Wlf3nF0zVzHu+F3bd7C7B8jUa/V1XBEvbl5E1vyPtu8KRk5unHz78E508cbzLfWtnRbPehzqeaJ5LQunMueNidSm2u/NZ1heT89meLnbGOfpb3/6OCqZNDzoWudDntu4QzbpglZObp1nf+5/aXbkzqtu9ELqSR5d6+/iyCWdM/mU6L+tL2m9dTMJ6gmjdKy+pX//+ysnNM6fl5OYp3tVDf/zDq37LRiqhdx+1tbbo0P599llfWnPnF2vW7Dn2yd1u7fNrtPSJR+XxeOyzOiUnN0+LnnhSqWnp9lkBtba2SXFxujH/m/ZZXxr5BVOVkJioxYuKtXrFUvvsTulMXndWOPuqqz2ix366QKWbNtpn4RKWmpauRU886dfXXwyi3XdJUkV5mR5b+OP/P3v3Hh5Vde+P/401A2UCQ4hkICRcBLkkMQZCuCoSlYhUVOyjh2qp0mN/OT2INV9LOPVQ+z2KPQpfDlrg6UlrRY9SUVrwW/giBkswyDUNBkwCpFwiiQmTGMJAhjYzXn5/MGt1z5p9m8mEi3m/nofnIbNv6/JZa6+9Zu89qCgvUxfpmjN3HuYXFKofR6Qz+/5I8xMrsY4ZO/1PNGKdzkstFvH3TXMltafLXT/q8QcPvR4b16/DwgX5WPHiEiS53cjNmxGyzTdJZ8aCVkJiIk431Mf0XBQLav3riWUZRdo+qHNxTE6xZmuCqKG+DsdqjmLK1Fwg+I1a3sxZOPhxWYc7yZFp6Wj5vLnD+6Grg8MRhxN//StGZ9x41Q7UrbjdA+Bra2NMExER0SW3Z+eH8uK9ob4OTR4P3O4B6moUoeE3jMSR6kr1YyKib5Ru14/M/Fr9UE9ySip+mD8fm9/9IxISE5EzYTKKfrUcXq8XuXkzMHPWbLlu7Ynj8s6JrOwc3H3fd/Fq0Wo01NcBwZnuxOv6hd1dIY5R9+mnyLgpS+4nOSUV+Y8/iZ7OeABA+f69WPfGmpBthTlz5yF7/ET595ZNG1FSvBUulwv5TzyFyoqPkZE1Bv2S3CHLEUzrgw89gjhHXNgydb/aNGjzY5bfN18tQv4TT8ljB/wBvPP711FRXhZShtrPzcodwbtVpt02HUWrXpLHg0FZVlcdCtlelIlIz67SEmRkZmHzu39ERXmZTDcADLl+GKDJt1Gdi28n9OpHHK9s32643QMwaMhQmRajcjMqc5VZWrXriP1d8LWFlJlR+SNYltr4qzxYgdTBg0PSKswvKJTHhyaGzPKSmzcDORMmw+8PYGBKSkjcieVqWYtYqjlSjTHZOejpjLdMt/aY6rLmJg+KfrUcYydMMjyWp7ERI0en4+zZM1i75rd4eN6PULZvt0yrmk6RD22ZiOOok2eiDDyNjci4KQtQ+hGxjl4d6cV6pP2GoI1R9ZuY+QWFaPm82bAMtTGlrjtn7ryQeBf5NSoLbTmKdNvpw2DRbtS+V5vf1paWkP5Pr64iyReAkP5FrU+VNt3NTR74/QGcbqiX/Y3Y7/d/mB+SBijpMooTlbZvtervjNq1WQxoqX2tSJfo06OJe5WaH/WYNUeq4e4/QC6HSZ+obmuUL/WcKWLmrnvuNyxL6LQds/7HKGa0dSLS5zt/TjfdSe7+uukcOnyE4TlfEGVh1tcatVm1vUCJVbUcjPooo/hT27o2LrWxINJnFLdqH2OWV5W6L7N+EDbzL9KhPedo+yI1Pju7PZ08VmNaLkb1E0n+/u/6t/DAw4+g8lAFxk+8GXGOuLB2Z7Q/o+MLuQbjREGvngT13Kwta1GOkZSxUVuJJvbQCbEg6lo7BhDHOFV7UpaR3lhB/Uxv/0Z50dZtwB/A/r0fISMzS44zjfJpFn+C0bZ2ysjoHKHXPozaq6DGr1lfYTS2EGNPu3Ei0qltV+X796K05AOZFrWdqeWlLRN1nCC2nZp7hywntR83K0O99mxE7zpJfPbO2tdx3wPfMxyTX/C1ob7uFByO7jIvZsdXy0AtY/Xcr8acHqO2D51yVZeLdYzGiHr0yj3J3d+wDMVnets11NeF9YXQaS9aavnqjamvZLbeQQQA58+dQ3JKKjJuGotBg4egbN9uHKmuQm7eDEy/826se2MN3vhdEfbv3olJt0xD6uAhqDxUgf7JAzFiVBo+Li+T72vJuGkMevZ0hj0H2au3C+MnTUEgEMDzz/wMZXt3yQLesX0bfrPqJVR9chC333mX7rP1LpcLGZlj8JtV/4VNG/+AxOv6IS0jE5UHDwAAxk2YjBGj0vGnDe/gjd8VobnJg7tm3YezrWfwtws+/NP3H8XOD/8sj3NNt2vgaWzAnLnzMDo9A6tWLMWGt9ei6pODmHH3PfJZT21+zPK7c8d27CotwYhRaTj+1xq8vOyXON3YIE/eYv9f4StMzb0DlQcPwNG9O8aMG4+aI4dxqvYkxk2YhOamJpn379xzP059Wos9H30YUhZ6ZTl02HAMTElF+b496NGjhzzhrXjhOWx7bzNuv3MmEhP7oeqTgzjd2ICMm8YgLSMTuz/6EL9Z9RK++PILTL55GlpamrH3o1I0N3kweMhQ/Prl5dhe/J7MKwDdZ0J79OiBcRMmo+GzOuzcvg3jJkxGknuAbpxYlbnKLK2ng3U4aMhQrPjPZ7Fp4x/Qf8BAjM2ZgLK9u0zLP7FfUkj87d+9E7fekYcePXqgfP/esHcQle3dhcTr+sHf3o7nn/kZao8fs8zL0GHDkTV2HD4Kxp6av9rjx8LKWpTloMFDULTqJWx4ey1GpWdgdPqNuulW2824iZNRe/KEXDbp5qloD/hRUrzV8FjxvXrh5aVLsL14K67p1k3WZe3xY2Fl2NzkQbdu3XDDqNEYcv0wWe5fff0Vmj2nw553F2VQd6oWK154Ds1NHkyaciu+wle6+1fbiBrrkfQbWiKvWWPHIW/mrJB/fRL6ovGzelQeqpD7P1xVKdtP/wEDcUvu7ag8eADXOhyy7+nRowduv3Mm+vTpi4bP6nG6sQETJ98C79mzKN+/V00Cpubegd+//jtseHstvvjyC+RMmISaI4cR8Leb9mEizs1iTe17tW1y70elOFz9CdIybsQf163FH9etDaun+N69bOXr2NHDIf2LOC8YvcdFbZ+OHt2RnTNBlre279KWbXt7O7Kyc3DTmGy8+4e3MW7iZMM4UfPSq7dL9q1W/d2mDevD2rVVDGiP197ertv3dyTuzfLztwu+sP590s23hvTvaplr+8TRGZn48ssvsfL/vID9u3cic+w49EnoG9avn25s0I0Zs7KMtP/RMz/4KMXzz/wM297bjJ7fduL06QakDBqsm+4Ptv6/sHT26NHD8JyvZdXXtre3G7bZr/G1Yaz26u2y3UfpnVfMxjvaccPoGzPlGG3vR6WmfePZMy2WedWyagNmbdUs/yId2nPO5WxPVuWyc8f2sPqxU87a/HXv8W2MnzQFvV198PLSJSjdvg2ZY8dh2A0jLc9pev2TIPJsNDkEAOMnTcHfLlwIa9/QOTf3c7tx53fuQVycA88/8zM0N3kwbsIkeQ6yKmOjtiLOb0ZlrNaPEOtYKN+3B0nuARg85Hp5nhydkYnR6Tfi3T+8Lcd87e3tSB08JGS9ybfmYmBKKjZveAeTb83V3b9eXtS6/cu+XbjzO/fKceY13boh/4mn4Gtrk33eV/gKd919L2qOHMb24vcM6x8dKCOzc4Re+zBqr4iwrzAbW1R/UhFRnIhrIdGu6k59itum34Ux43JQtOol/Hnr5pDzm3YyUG/sop7b0m/Mwl2z7kPjZ/VY8cJzITEtrmWMyvAH//wvOPXpxbLfv3snHN27o/7UpyHp1/rbBR/GjJuAdn+7rOPvzvk+Pqs7hYry/aZj8rOtrbh56m04f85r2Z+I8YNRvF3TrZutc6fKqO2fP3curFzVMYNajuoYUaWuL8q99M/FhmW456MPDbcT19Bm7UVlNM7RW/dKZOsRM+G9P22AIzhbeGDfHgBAWnomDlWUy1lFr9eLsn27MWjIULhcrpDt7QgEAthVWiL/HpmWDp/PJ4/XEHzcbVRahmari7xeL15/5ddydu5IdSUcDgecvXrLdba9v1mmtaK8DE2nT4fsS9yC21Bfh4MH/gKXy4VBQ4Zix/Zt8uTaUF+Hw1WVSEvPlNt1RFp6Jg5XVcr9H9i3B35/AEOHjwhZT+RdPOqXnJKKhL59DW93VctSa+jwEXA4HCgt+UB+tnH9W7hwoS1kvdoTx+Ws8IF9e3D27BkkJCaGrBMNr9eL4i2bMHzESCQr75qItsyN0ir2V7Zvt4yN0pIP4IyPR3JKqmn5q/En0h0IBDRHNmY3L81NHnmMSGj3u6u0RMa7mm613ZQUb5Vl1WDz9nNt+Wm5XC7kTJgckpaK8jLs2Xlx0tLpdMo2uGfnh7r7QLAMxDvNKsrL8Fn9KZkmszqCTqxb5d/Klk0bsXBBfsi/2hPH5XKxf+072EpLPoDD4cDQ4SNwtLoKCL5jzdmrN3xtbWhq8sh47J+cYthu/+eV/5b5PFpdhUAggCR3f7ncqA+zG2sdYTdfav8izgt66dBrnyXFW0PKW+todZUsZwAYlZaB1jNn0FBfZxknVoz6ED1WMWBXR+LeiFr+UPp3vTLX9okV5WV4d/06IFh3p2pPym/37DIry2j6HyErOwcJffti4/q35GdbNm1EQ31dVOlWz/lGjPpamLRZs1jtaB9lZ7wz5PphmHZ7Xsg3v3aOa5ZXLas20NH8G51zzHRGexLslgtslrOav0AggOItm+D1emWf6YyPh8vlsrU/VXJKKqbcMg0b16+T6Y6GtkyPVFfiwgWfPN+ePFYDX1ubbNtWZWzUVoRIythKtLFQWvJByHtXtXGrdaS6Egl9+8oxbFp6Jk7VnoTX6zXdv0qtW3WcKfpzbZ93YN8e+Hw+jExLl59Fw6iMrM4RetR41rLTV8DG2EKIJE607erksRqcPXtG1o16nlDPnaIdascu2nNbddUhXPC1yfW1MW2nDMVxvV6vHDMbEWkVaTG7BlTjr6K8DIcqyuVyNea0/YndeLN77hSs2r7RmEGvHM3GiHrri3J39uptWIZm24n6MmovejoyzrkSRDRB5PV64Wtrkx2gy+WCMz4eHk9jyHqtLS2mjTUSbvcA9EtyY/GSpVi2sgjLVhYhe/xEw0Ffbt4Mud7Djz6GuLiLE1pGWj5vBoJ5K/rVcgwaMhTLVhbJbyidvXrD4XCgtaUlZDuPp1GeuDtClGH2+Iky3YuXLMXAlBTdCxPtCWlkWjpaz5yRA79IJCQmwu/3w3de/1e4LoWK8jK0njmD2Q98L+TzWJe5s1dvOJ1OzJw1W5ZxwaLF6NOnL4YMvd60/Dv6PqFY58Uuq3bjcrlQ+PNn5TLt7cmRMsojgh3kju3bULBoMX65fFVU752KtI3ARv47Si8ufOfPwe/3IyExUbarkWnpGBl8z9rBj8uQlp4pB4hGL+bPys7BL5evknHas+fF21ONiD7MqB5iGWt285WQmAhXnwQULFosy3/mrNm66TBKtxHtQEYMHHeVlkQVJx1hFQMd1ZH8WPXvZn2iGLDNLygMaTux1JH+xypvdtNtdM6PhlGbNYpVxKiPMhvvxMXFYcrUXOz4c3HIGCEWxxWs2kBn5z8SHWlP0Yh1/qLdn9/vR5PntPpx1Jo8py9evOvs004ZG7WVS8kqnQ31dWg9c0Y3brXEGHZkWjqSU1LhjI9HackHlvtX6bUjLb0+T1yPddYFp51zRCT08hirMVMkRLmp161CJGMXBK93W1vP6p6PrMpQPJq0bGURCn/+rO7+VdoJC6NrQBF/RnmERX9iFW/RnjujbfuRjhGtyt2oDK22i1RHxjlXgmvVDyJh1kG1tfkuBlcUharl8TSaPuOnJZ4PXLK4EF7Ne23MJF7XL2SSaOlzzwDBQeb8gkK8+WqR4YBf7eyiIcqwTOedJwjObmqdPFYDv9+PkWnpSEvPRHXVoZDlkRB3dog8JLn7I87hUFeLiNGzoEY2rn8L+Y8/iZvGjJOfmV1kRVPmvvPn0Np6FhvXrwvrSAFgzLgJhuWfmzdD3g0njpuQmGg58SjEOi92mbUbl86z83Y7eD1meYRmFj0rOwezH7w4GahXD0YibSOwyH8seDyNYXGB4K/0tba0wKv5psfvb0fZ3t1o8pxGzoTJGHL9MAT87bp1L/qsVSteRIPmOX8zog8zq4dYxZrdfLW2tOCz+nqs+e9fWR5XL91igCP6ZtWR6kpMu2MG0jIvPgd+8liNZZzEmlUMdFRH8qP9kkavf7fqE8X7IPTeu9BRHe1/9PImRJpur845P9I+w6rN6sUqYtBHWY13xF2V0++8Gwj2w7A4rp0LFC07bSCa/EeaDjs60p6iEev8me3PSEN9nYzvS8GqjK3ayqVilU4E7wzJmTA5LG5V1VWH5N0IvrY2eYeE1f5VYgJCtCPtONOszzObBOgIq3OE3rjLjFVf4bU5toim7UQikrGLFasyBBByrsp/4inLd9Q01NfB19Zmeg0o4lu9NteeC836k6zsHMt4i/Tc2ZG2H+kY0U6565Wh1Xa5EfwKZEfHOVeCiO4g0lNddQiZWdnyzgBX8BfOxM9ANnlOIy4uTt6WlpWdg8ysbGUvxo5WV9n+eU51hnpUWkbYhXzOhMmyg8nNm4EktxulJR8gOSUVP3jsX+R62kmjU7UnMe226bJDTE5JxbTbpus2zGjyW111KGT/ZrzB2x2n3Z6HOEd3eXtgpMTtnFM1Pzc/ZWou4q7t2ASR6Cjsaqivw47t25BxU5asq0jL3IrX68XphnrkzZyle3IxK//Wlhb06dMXYydMAoLxnTNhsrqaoVjnxS6zdqPOxienpCLJffFFdNHQy2NWdg4m3XIr7ntgjuwbmjynEfD7la3tMasjPWb5z8rOwX+8+F8yXdE4Wl0Fp9OJu+65X3521z33w+GIkwPJI9WV6Ns3Ec743jh5rCb4rWQLxowbb1j36jc3I9PSw75lMerD9OpBjTWPpxFJbrdcftc998uXENplJ18nj9XA4YgLKR8jYjCjzdfYCZNM0yXKeMLkqSE/ORxpnHSEnRjoqGjzI85DRv27WZ8oBl5iIOgK3nYdKx3tf8SXJNo7T2fOmo0Ro0ZHlO5kg3N+pKzarFGsmvVRdtgZ79SeOI53fv86pt95tzxOR4+rZacNdFb+oxFte4pGrPMXzf6SU1LxHy8sj2ibjjIrY6u2YiYrOweLn3tBd7/RMEsnNI+Ezbj7XtNfbD5aXQVnfDxyJkwOOa9b7V/rSHWl6ThTr88T534xlo81s3NENOz0FXbGFp0tkrGLFbMydLlceOSxH8vPtRN98wsKMSf4gz96qoOTl874eMP6b/m8GaPTM0LG5ANTBsnlZv2JVbwZnTtdwTtm9PbZkbYf6RjRrNwFvTK0s51dHR3nXAk6dAcRNN9KPfzoY3j40ccA5c3jYgJg5qzZmDlrNpqbPDh6uArxvXqF7MdIQ30dNq5fhwcfesTy1wBKg2+lX7ayCADwWX192LtiPI2NWPTM84hzxMn9NAR/IaD/gGS5bXPwbeMI3hUzZ+48FCxaLPdj9MZ2O/ndVVqCBx96BJlZ2Xjn96+jpHgr3O4BIfvXHl91tLoK026bHjLYipS2XMVt+LtKS5DQt6+6qqGK8jJMmZqLgkWLDWei7Sgp3oq09MyQxhNJmduxLvhLH4uX/CONIs1m5S9iTMSf9tcl7IpFXtSyfvPVi3FqxKrdHKs5KtvsBV8bfD6f3DbSY0Enj+JYXu/ZkL5hy6aNYe3WDrM60mOW/1hoqK9D0aqXQvobkR7RJk8eq0G7vz2knZ4/dw74+mvDk/qBfXuQM2GyjNPmJk/Ye8GM+jDo1AOUWBNtTSyvPFiB5iaPXLch+EjIw48+hryZs3S/zbKTL6/XizfX/DakfGAS96tXLA1pn7Unjhs+Xw7NiXx0egbeWfua/NwsTtR8REp7fhH5sIoBldr3W4k2P3rxqfbvZn1i2b7d8hwW8AfQpIkRlV7MmNGub6f/Uc8r3uDt7flPPBVS7rtKt5umW03n2jW/NTznR8KqzRrFqlkfpddHqvFnZ7yDYHkmBH9xKi09E6tXLDU8bqQTm3pxpsZnNPm3m45L1Z7sUOunpHhrh/OnZVZeFeVluse/HMzK2KqtmElITNR9D5AQ61gQcZuQ0MfwfI1gvfja2uCMjw/5wtZq/1raNqo3ztTr89Rf3rJT/5GWkdk5IlJ2+go7Y4vOFunYxYpZGcb36iU/F/UJAHGO7rrvFRLENaD2HUMqdSzYrFyLWvUnZvFmdL3sDL5WRq+uOtL2EcUY0azcYVKGVtvZZTXOuRrY/pl7unIkKz9hGyviFkC9n28nIiIiIrrUfjT/JyjbuzumY14r4hHVSC8OYyE3+Aip3oQSfXNlZecgZ+Jk/Hb1y+qiK15u3gy43QMiftUIXZk4QXQVEo9wxfKk5Qo+L+lra4vpfomIiIiIrhZZwXcmbnznrUs6KYXgl8D5wZ+E58U2EV0OnCC6iuTmzcDM4GNrHf1WQUwIaZ/hjOY2OiIiIiKiq512bBztY0WREhNCPZ3/eCeL9lUdRESXGieIiIiIiIiIiIi6uA7/ihkREREREREREV3dOEFERERERERERNTFcYKIiIiIiIiIiKiL4wQREREREREREVEXxwkiIiIiIiIiIqIu7lsJ17n/t/qhyuVy4SeF/w5Hj+6oPX5MXWzLnLnzcOvteSjbuwtz5s7DzHvvR+XBA2hvb1dXRXJKKn6y8GmcO+fF6cYGdbFtc+bOQ8ZNY1B5qEJdZCtPuXkz8J177kfZ3l3qIrpEsrJz8OMnf4qzrWc6FAvRys2bgQcf+oFhrJK13LwZmPejH6PmyGGcP3dOfj6/oBDjJ00JaV9Z2Tl47F+fwLG/HsV37v2u7DMuBe2xtem8GljFqVWfq2XW/yanpOKnT/8C37n3u8ibOQsjRqVdsvohIiIiIqLOdVnuIFr3xhosfe4ZeL1eddFlk5ySisXPvYCs7Bz5mds9ANVVh0LWu9LNLyjEnLnz5N96+bqaVJSX4ReL/hcqysvURXSVOFpdhUAggCR3f/lZckoqktxuJLndSE5JlZ8nJCbC7/fDd75jEzRZ2TlY/NwLIfv+JlHbuZVY9bmDh16PjevXYeGCfKx4cQmS3G7k5s1QVyMiIiIioqvQZZkguhq4XC4kXtcPR6ur1EVEFAHf+XPw+/0YlZYhPxuZlg506wZ06xYycZSWnolTtSc7PJFBnWPPzg/lZG1DfR2aPB643QPU1YiIiIiI6CrU7fqRmV+rH6pcLhfyn3gKNUeqMSY7Bz2d8Qj4A3jn96/LiwWxTr8kNwCELZ8zdx4Sr+uH1SuWIjdvBnImTEbRr5bD6/WGbVtzpBru/gOw+d0/oqK8DMkpqch//En0dMYDAJqbPCj61XKMnTApZD8IPmox7bbpKFr1Eqbm3gEEvz1XiWOW7duN1pYWPPjQI4hzxAGa/WsvUtU0lO/fq7tfdT29fQmiTFo+b0b2+IkAgNoTx7F6xVIgmJecCZPh9wcwMCUFWzZtREnxVsyZO0+uf8HXhqJVL8F3/lxY+e/buxMTJt4Skq9PDh7AjTeNNSyzhvq6YOou0h4LgEyDXkzUnjiON18tkunQxkBySip+mD9f1ik0+RNp0csXgJDtRJkBwJDrhwE6daHdT+2J4wCAls+bdevLbh14GhuRcVNW2HKxzsxZswEl7kWeKw9VYPzEmxHniJP5EuWs3Vbdt9725fv3orTkAxlj6v7UtqTdX6Rlpy63s70RbfsXfwOQZb/ujTVhMWJ1PKO2NnbCJMMy1VK3rzxYgdTBg/Fq0Wo01NeFlaXap2nNLygMi7H5BYUAgNUrlpruS803dNqGYLSfhMRE0zjV7g9AWNs1S4+IUdH2BbN+g4iIiIiIrj4R3UE0JjsHRatewsIF+fis/hTyZs6Cy+WSFy2+tjYsXJCPhQvyse39zZj9wBzLRzzEtqdqT8ptHY7u6Nnz4kUbgncb7Ni+TT7W4HQ6MXbCJBytroLD4cDQ4SPkumnpmThcVRnRBUtFeRlWrXgR3rOtWPvaK7qPYsx+4Hs4XFWJhQvysWRxIT6tPRGyXDBKqxFx4Sv264yPD3l0pF+SGwc/LsPCBflycmjQkKFYsrgQCxfk43BVJWY/8D14vV4sfe4Z1J44jvL9e/H0U4/j/65/OyxfBw+U2y4zl8uF7t17yGOV79+LnAmT4XK55DoiJsTjJoueeR7FWzbJGJkyNTdkn0aysnMwfMRIrHhxCRYuyMeGd95SV5GGXD8M1VWHsHBBPrZs2ojMrGz5CJ1aPtVVh2QZG7FTB+3tf8fCBflY+9orGJgySD5WIy6SRbq3vb9ZtgsAiIuLw4hRaXjx2X/HksWF8Pl8mP3A9+S20++8G2tfe8Xw2Nrt1772CjKzspH/+JMoWvWS3J+YCFXbktif9hEgq7IbnZ4h87LixSUYnZ5he3szR6orkdC3L5JTUuFyudA/OQVHqitRXXVITgKJO4maPKfldmbHM2prJcVbsfa1V+A924oVLy4xnRwS2y9ZXAj3gAGIi7s4mRppn1ZddQiDhgyV9Z6ckoqEvn2xq7Qk4n2Z0WvnYlLJLE71GPXnWtoY5eQQEREREdE3W0QTRDu2b5MXA7tKS+BwOODs1RtDh4+Aw+HAxvX/uKg/sG8PfD7fxUdJTIhtS0s+kJ9tXP8WLlxok3+XFG+VFyfaxxoa6utwrOaonIQQF2VHqivltrEkLmS9Xi/27PxQXQyYpNVI7Ynj8q4Dr9eLsn27Qy40m5s8OLBvDxC8aB00ZCjK9u2WE1ilJR/AGR9v+0IzkjLzer14/ZVfy2Mdqa6UdS6ImBB5/az+lLxgra46BGd8fNhFp5E4h0NOEhw88BfDC8/aE8dlGR/Ytwdnz55BQmKibvmUFG+VdxEZsVMH7/1pAxCcTPys/pSsU3Vy7cC+PfD7A3ICLhAIoHjLJni9XrlvUSZp6Zk4VFEuy0vv2NrtTx6rwdmzZ+TxvF4vTtWelHGptiWxv7T0TAhWZadt4w31dThcVWlreysnj9XA7/djZFo6hg4fgfh4J5o8p9Ha0iInjkalZaD1zJmQejc7XqRtTWtkWjp8Pp9sW16vF8VbNiEQCACasrTbp4n9iMngkWnp8Pv9OHmsJuJ9RcssTvUY9efCkOuHYdrteWF3TSWnpGLKLdOwcf06wzZKRERERERXn4gmiIzovVjW6/XC19ZmeoECg21VLpcLhT9/FstWFmHZyqKQO0K0dyaMTEtH65kzuo+AdJS4C2HZyiIU/vxZw0kPs7Ta0drSAr//4kWqytmrN5xOJ2bOmi33X7BoMfr06RvyHhcrkZRZbt4MeayHH31M3mGhp+XzZrR83qx+bEtFeRk2vvMWHnzoESxbWWR654MRZ6/ecDgcaG1pURdFxKwOtFwuF5zx8cgeP1GW0eIlSzEwJcVy0iS+V2844+Ph8TSGfN7a0hJ2oS6INqVuIyQkJsLVJwEFixbL9MycNdvWJJ1R2Xk8jba2t6LtDxISE+VEkHbiKPG6fhG9FL4jbc3tHgBfW5ucSFTp9UtmfZqYrBOTaWnpmXKiMtJ9XQni4uIwZWoudvy5WLdv8Pv9IXd6ERERERHR1S8mE0RmF7VGF7OC3rZJ7v6IczgA5V1B4vEM7R0h2gvMtPTMkAvMdW+ssfV+FLtWr1iKhQvycar2JPKfeCrsotkqrXYkJCbCEXxnkMp3/hxaW8/KR5LEP+1jJnaYlZmWeG+JeFxr7WuvyDssOkNFeRmefupxrHhxCabdNj2qSaK4uLiQyRkxiRMJszrQEhf5WzZtDKmPhcHHAc20nT9nOEHQ1uYznTA10trSgs/q62V9iX96j0yqxIuk9Sa2zCZSIiEew7ppTI6MOVGGw28YiYS+fcMmqIx0tK3pTXwlJCbKCVC9fkkw6tPExOtNY8fBGR8vX3Afzb4ut0AggF2lJZh+591h7bChvg5Ln3uGdw8REREREX3DxGSCSEw4iHerIDi5kOR2W/4KWJPnNOLi4uR7VABgytRcxF17cYJIvbMhOfjz2IJ4jGba7XmIc3SXj3og+JJY8aLYjnC5XHjksR/Li0mjizqrtOoZmDJIvlMlOSUV026bHvKIlJbX68Xphnrdd4VEwqzMtNS7LEalZZjeQWRG/SUrkVchN2+GvBD1nT8Hn88nl9klHjPSvidp7IRJ8oW+RiKpA1V11SFMu2267Uf8tKqrDoW8U8flciFv5iycbqi3dWzVyWM1cDjicNc996uLLIk7YLR5EWVhNIGoJe7m0b4/SXW0ugpOpxNJSe6QiaDqqkNIGTQYbW0+nDxWE7KNkWjamlZrSwv69OkrHwlzuVzImTBZLo+mT6soL0PrmTO4Z/YDOFV7Uk6gWO3Lqm1cLrUnjuOd378eNkmUnJKK/3hhedjEERERERERXd1iMkHk9XpR9KvlcMbHy8c97L7AtKG+DkWrXsLo9Ay57Wf1p+Q7iBqC78x5+NHHsGxlEfIffzJs8uBodRXw9ddRX1hDOY76CJnX60V8r15YvGSpzNuba34bdiw7aVU1NXmQN3MWlgUfFztcVWl698m6N9bA19Ym07JsZVHIJNiu0hJkZmXjl8tXISs7xzBfdsqstOQDJLnd8jj9k1OivoPIG3zHS2ZWtiybjzV3PbW2tGD6nXdjWfAxrVO1J03LwcibrxYBgCyftPRMyztLIq0DrZLirThcVRnyWJcaP0ZKirdi2/ubZbyIfEd715vX68Wba34b0paWRfC43ro31oTkpWDRYuzYvs12WVgRE39nz54JmQiyE4sqq7YmJmsKFi3WnSSuKC+Tkx/LVhZh0TPPo+ZItYzvaPu06qpD+HZPZ8g7vaz2ZdU29KjtvLNUlJdh2/ubMXPWbN1yJCIiIiKibw5bP3N/pUvW+Znoq4H609+X0tVaZpHSPoqkN9FxOeuAvnnEY5nqz9MTERERERFd6WJyB9HlNvuB75m+aJnCdZUyu+ue++F0Og0fCyKKlUgfTyQiIiIiIrqSXNUTROIXtpzx8fLRIjL3TS+zOXPnhTxeNTo9w/KxIKKOml9QGPHjiURERERERFeSb8QjZkREREREREREFL2r+g4iIiIiIiIiIiLqOE4QERERERERERF1cZwgIiIiIiIiIiLq4jhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLo4TREREREREREREXRwniIiIiIiIiIiIujhOEBERERERERERdXGcICIiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi6OE0RERERERERERF0cJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFcYKIiIiIiIiIiKiL4wQREREREREREVEXxwkiIiIiIiIiIqIujhNERERERERERERdHCeIiIiIiIiIiIi6OE4QERERERERERF1cZwgIiIiIiIiIiLq4jhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLo4TREREREREREREXRwniIiIiIiIiIiIujhOEBERERERERERdXGcICIiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi6OE0RERERERERERF0cJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFcYKIiIiIiIiIiKiL4wQREREREREREVEXxwkiIiIiIiIiIqIurtv1IzO/Vj80kpySivzHn0RPZ7z8rHz/Xqx7Y438e87cecgeP1H+fcHXhqJVL6Ghvg4AkJWdgwcfegRxjji5zpZNG1FSvFX+Pb+gEEOuHyb/bm7yoOhXy+H1egEbx2A6I0snEREREREREXVtEU0Qfetb1yLJ7Ua3a/5x45Hv/LmQiQaXywVnr97y76+/+gpNHg++/PILAICje3ckXtcP3bp1k+ucPXMGFy745N99E69Dj29/W/79RSCA5qYmfP31V4CNYzCdkaWTiIiIiIiIiLq2iCaIiIiIiIiIiIjom4fvICIiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi6OE0RERERERERERF0cJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFcYKIiIiIiIiIiKiL4wQREREREREREVEXxwkiIiIiIiIiIqIujhNERERERERERERdHCeIiIiIiIiIiIi6OE4QERERERERERF1cZwgIiIiIiIiIiLq4jhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLo4TREREREREREREXRwniIiIiIiIiIiIujhOEBERERERERERdXGcICIiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi6OE0RERERERERERF0cJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFcYKIiIiIiIiIiKiL4wQREREREREREVEXxwkiIiIiIiIiIqIujhNERERERERERERdHCeIiIiIiIiIiIi6OE4QERERERERERF1cZwgIiIiIiIiIiLq4jhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLo4TREREREREREREXRwniIiIiIiIiIiIuriYTBAlxH2FV2/8O4Y7v1QXERERERERERHRFS4mE0Q/HvQFbk38EhvG/B1jenOSiIiIiIiIiIjoatLt+pGZX6sfRuKGnl/h/+X8Dd/qdvFv7xfAd8u/jZN/i3zuaX5BIYZcP0z+HfAH8M7vXwcAPPjQIwgE2lG06iU01NdptoqNOXPnIXv8RNSeOI7VK5aqi4mIiIiIiIiIvrEin8UJSnJ8hSeG+PHkUL+cHAIA17XAyvS/41vd7M87ZWXn4JfLV4VMDhERERERERER0aUR9QTRcyP8eGRgADP6fQm4rwEm9gC+fXHZ6Piv8fCAL9RNdLlcLuTNnIU4Rxwu+Nqw4sUlWLggHwsX5GP/3o/U1YmIiIiIiIiIKMaiesTshp5fYev4v138w/0tIKs7cE034MJXwN6/Ae1A49+74ea9PdVNw+TmzcDMWbPl42QV5WXqKsjKzgl5xAwA8h9/Ej2d8XId8WiYy+VC/hNPoV+SG1s2bURJ8daw7Rvq6+RncY44NDd54GlsRMZNWSGPmIm0CeX792LdG2t090dEREREREREdLWK6g6ie/sH7w5y4h+TQwDQ8xpgfA/gW8CAHl8jLd76hdVu9wAAwNmzZ3DyWI26WFeSuz/i4rqHfDbk+mGYM3deyGdGtJNDANAvyY2Mm7JC1pkzd17I5BAAZI+fiNy8GSGfERERERERERFd7aKaIEpzfnXxP6M1k0NC/LeAQdcCALJ6B9eLsYryMjz91OPyUbTy/XsBAInX9VNX1TUqLUPeObRkcSEWLshH7YnjcrnL5cKgIUMBAFs2bcTCBfnYsmkjACAtPVMe/xf/9hTvHiIiIiIiIiKiq15UE0R94r6++L6hfhcngsKkXvw8yWH99JrH0wgA6NOnL4YOH6Eu1iVear1sZRGWrSxC9viJ6iqmxETSqdqT8Hq9AICWz5vlcmev3nA6nQCAmbNmY9nKorC7iYiIiIiIiIiIvimimiA6G+gGxJts6vwW0O1rnPtCubtIx9HqKlzwtSHOEYfZD8xBckqqXHbfA3OQlZ0Tsj4ATJmaCwBY+9orYXf/aInH18QdQ6pBQ4bC5XKF3DGkEncQiX/i/URERERERERERN8UUb2k+gfJAfxifACYZPAS6q+/Bt734a79PVHjM5lIClJfBi2IF1cDCHkp9OwHvoch1w9TV5cvmJ5fUBiy/MIFH+KudcjtR6al6x4Pmn3MmTsv7M6kC742FK16CUnu/nxJNRERERERERF9Y1jP3uj4w+lrcab5a8Bn8BLqT79ASfO1tiaHAKCkeCtWvLgEF3xtIZ8HAu1o8pwO+QwAdpWWIOAPAMEJHfEOImHj+rfkvi742rDjz8Uhy0uKt4Zso7ePdW+sCfuMiIiIiIiIiOibKKo7iABgTO8vsWZ8O3pN7AH00kwEnf8SR7a143vl37b1iBkREREREREREV1eUU8QAUCfa7/Gvw72Y3b61+ibdA0unPsKf6johuUnHGjj5BARERERERER0VWhQxNERERERERERER09bP3kiAiIiIiIiIiIvrG4gQREREREREREVEXxwkiIiIiIiIiIqIujhNERERERERERERdHCeIiIiIiIiIiIi6OE4QERERERERERF1cd9KuM79v9UPYyU5JRU//slPcfLEMZw/dw5z5s5Dxk1jUHmoQl31ipOVnYMnfvo0Zsy6F3kzZyFv5ix88eUXqD1+TK6TmzcDDz70A1QePID29vaQ7TuqM/cdLb0yEf8Sr+uHM2da8JOFT+PcOS9ONzaom0ckKzsHP37ypzjbesZwX/MLCjHshhGdHk9XYl10xJy583Dr7Xko27tLXXTJiL6gX//++M4990eVlm9avdgVy/qbX1CI8ZOmxGRfl0Nu3gx85577EQj4MecH/4zDlQfDYsFOX3IpJKekmvaPV0o6L6dYxvalNmfuPMy89/4u1x9F61KdvztDVnYOHvvXJ3Dsr0dx/tw5dTFcLhd+/GQh/v73v3VKW3a5XPhJ4b/D0aN7yJj0anC5zjnJKan46dO/QLdrrumUMrsc+bI6p1yJLvV5rrPL6FKMQ7V95aU4XqTUMlbbgrh2vP3Omfj666/w6P/3405rhxQ7nXoH0ci0dACA7/w5uFwuDBoyFB5Po7raFSc3bwYefvQxbHt/MxYuyMfCBflY8eISTLttOuYXFKqrdxkV5WV4+qnHsXBBPrZs2ojmJg+WLC7EwgX5WPfGGnX1DqkoL8MvFv0vVJSXqYs63fyCQsyZO0/9OCrJKalY/NwLyMrOURd1WCzTeSXozLK6WnV2maxesRSrVyxVP9bV2WmxI5qYv1x9SVZ2DhY/9wKSU1LVRbouVzovlyshnuyaM3ee5bl/3RtrsPS5Z+D1egGb25C+SNvO5Xa1pfdyiuScE0sN9XX4xb89hZLireqiiOn1XZcrX1c6tR/s7POceryOimbM0dVp24LL5ULezFnY9v5mPP3U49i+bWvM2iF1rk6dIHK7B8DX1gav1wtnr95wOBxobWlRV7uiJKekYtpt07Fl08aQAG6or0PRqpeQ0LfvVTGgJSIiIiIiIrrUrpZrfwrX7fqRmV+rH+rJzZuBnAmT4WlsRMZNWXICZc7cecgePxEAcMHXhqJVL6Ghvg4IzrxWVx1CSfFWZGXnYNodM7Dmv38Fr9eL3LwZmDlrtu52Wnr7B4Dvz/sR3lzzW7lNckoq8h9/Eju2b0NJ8VbMLyjEkOuHAQCamzwo+tVyeL3ekM9rTxwPm/EX+RTrq+YXFKLl82ase2NNWJnAYJ965sydh8Tr+qHl82aZP+22ajpE/no64wEA5fv3yrt21GXa/GZl5+DBhx5BnCMOAEImvvTKVq8O9KjpQzAdP8yfj8pDFRg/8WbEOeLC9qumVZsPLbGvze/+UX7ToE1vc5MHfn8Apxvq5fbamAr4A3jn96/rbgulHASXy4X8J55CvyQ3oNlHQmKiaT2r24k8J7n7h5S9tl5U2rgU2/vOn9Pdr/q5SGeT57StshWxB0Ae0048jZ0wKazOtXEAICRddtsCgt/A6pWVs1dv3bSIPkSbnvkFhUhyu2W8GcWDnThVafcFTd5E3Zft2y3jSf3MKh11n36KjJuywsrLqEzuuud+0/pT41Hdr5aIhdUrlpqWi1ksG/Ujevmrrjpk2pZg0I5PHqvRjXk730KqfYlarmZ9Qc2RaozJzkFPZ3zYMdVy1i7Xi5eN698yLF9teYl0WrVT9fi7SkuQkZkV0mdqqfsTx56ae4esP7XPUPtN7XLR/vz+AAampNgaEwhqHdiNbaO+ScSh2bYqtY6M1tX2zQjGy9HqKsPYLvrVcnz/h/lh25QUbw1Lv9ExVWpda2NNOyYRtJ+ZHXPO3Hnon5wChyMOffr0xQdbN2Py1GkhMaT2s0b1q6ZRbddW529BrZfaE8fx5qtFyH/iKXgaGzFydDrOnj2je87RlotIT2XFx8jIGiPXUdu7UX7MZGXn4O77votXi1ZjZFp6WHrV/laNdzUNgpp3o/hQzzFqHYt2AaV8hD9v24rMm8bonrdO1Z7UPaagPWdAJy1W7dDqnFO+fy9KSz6Q+VHrRO2TtGWpLYeAP4D9ez9CRmYWXi1aLcdN2vOy2blIjWerc6Hou0Ss6pWttozsxpwaE9o0ivLbtXMHpt02Xfc8ZXQsNX/a/ar9wtmzZ8LiQtvHGNWJWd8p+hg1HXrpN4snLbPjGZ13YRBTB/btMU2XYBVHRm1TO4Y1O4eqfaW2P/7+D/N1+34E7+JRaWPpgq8N9XWn4HB0x+oVS0P6NG25aNu6XjmJ/kdv7LKrtCSsraxd81s8PO9HIe1Db8wnytnoWl5LjSF1XGV0/tNS1zM6Vldi+x1EEyffguEjRqHuVC1WvPAcao8fw/yCQvRLcmPFfz6LTRv/gP4DBuKW3NtRefAAEvslYfItt+LQx+U43diAaXfcie7du2Pnju3Iys7BXbPuwztr/wdv/K4IOz4o1n2Oe87ceRg0ZKjcf/qNWUhy90f1JxXIHJOD5qbT8pnSH/zzv+DUp7XYvPEPsoE8/8zPsH/3ToybMBlfB/OgTa/es8ITJ9+Ca6+Nw84d29VFAIBhN4xA4nX9ULZ3F4YOG46sseNkmTQ3eTBpyq34Cl9ZPluZcdMYpGVkovGzeqx44Tns370Tk26ZhtTBQ1B5qAJDhw3HwJRUlO/bg8R+SXLy6zerXkLVJwdx+513yec9x02cjNqTJ+SySTdPRXvAj9bPm/FP338UOz/8s1x2Tbdr4GlsCCvb/gMGYmzOBN0y0aNNn3gOtldvF8ZPmoLerj54eekSlG7fhsyx4zDshpEo27tLNkCjfGj16u3CmHHjUXPkME7rpNfRozuycyag8bN6+VzutNumY9WKpdjw9lp8ha8wNfcOVB48gB49eiAjcwx+s+q/sGnjH5B4XT+kZWSGPcPb3t6OXaUlGDEqDcf/WoOXl/0SpxsbLOt5dEYmvvzyS6z8Py9g/+6dyBw7Dn0S+uKDrf8Ph6s/QVrGjfjjurX447q1us8Ma+N123ub0fPbTpw+3YCUQYN191u+f69uOkUbEPHk6N4d9ac+VQ8nY2/3Rx/iN6tewhdffoHJN09DS0uzaTwdra7CuAmT0NzUhNONDXC5XLjnu/+Esn270fhZvRxcauPZ7jsSTjc26JaVUVpqjx8LicHH/vUncMbHY9XyF/B5c5NpPDi6dzeNU1Vu3gxMv/NurHtjDd74XVFIWy3fvxepg4dg8JDr5baTb83FwJRUbN7wDibfmmuZjkAggOef+VnYsY3KxKz+/nbBF1E9ZNw0Bj17OlG2d5dp+920Yb1uWtR2qe1HxP60+bNqS0b1Vr5vD7YXbw2LeTu0fcnfLvgM+0StHj16YNyEyRg0eAiKVr2EDW+vxaj0DIxOv1H2KflPPAVfW5tst1/hK9x1972oOXIYn3x8AM1NHgweMhS/fnk5the/Z1q+ory0fV4k9bztvc24/c6ZSEzsh6pPDuqWjbq/9BuzcNes++Q56Isvv0DOhEmoOXJYvjNwdHqGrIuqTw5ixt33yHcHiLr8KFiWtcePmcaDVjSxbdY31R4/ZrmtamruHfj967/DhrfXhuVdq2zvLiRe1w/+9nY8/8zPUHv8mGFsi/5o547tYdtEcv7TEoNfo1j7Gl+HnM+ysnNw05hsvPuHt9Grt8v0mBk3jcH1w27Anza8g7WvvYJ2f3tIDEI514/OyMTEKbfg1y8vx4a316L1zBlc8PlwTbdupv2OGhfq+Vur9vixsLYj2mN8r154eekSbC/eatkGA/52jJswGSNGpeNPG97BG78rQnOTB3fNuk++/0RNl1G8qvonD8SIUWn4uLxMt61ruVwuW30OIohJUR4Nn9Wh9vgxw3ZxpLoKu0pLsO29zdj23mYZk2+9/ruw89bojEyMTr8R7/7h7bDjaWnPGdBJi1U7NDvn1J36FLdNvwtjxuWgaNVL+PPWzXLcU3moAi6Xy3Asp46T/7JvF+78zr3o0aMHyvfvlfEg0ml1Lop0XCfytXPH9rCy1Y4JZj/4kO2YMxt7VB6qkOU3ZOgweZ7StoFhN4yMqr2q/YKjR3fDPuaabt0M60SvH1TPx2Zt+Py5c5bxpGXWVxudd41iyu6YwyqOjNqm3XOo2ldq++NrHY6QeklOScXkW27Fn9/fEpZWdWx1trUVN0+9DefPeVG2d1dInybav7atGpWTGMuqY5eePZ2648ZrunULaYdqurRj5Mm35mLI9cNkWXz19Vdo9pwOu44yaquVhypsXxeZ1VNXZfsRs8Tr+qH2xHE585aVnYOk/v1RvGWTnGE7Ul0Jh8MBZ6/eSHL3BwA0eU7L7Vs+b9bs0VxWdg6GjxiJN9f8Vt5Bk+R2w+NphNfrRcDfjoTERLluQt++KC35ALl5M+CMj8ebrxYBAIYOHwFnfHyn3d7W3OTBe3/aAASfrf2s/hTc7gHqarq05en1elG2bzcGDRkKl8sVst7ItHT4fD4c2LcHCD7udqzmKEalZQAASoq3ypnYhvo6NHk8IWkQ/2+or8PBA3+BK/g+qLJ9u2XdlZZ8AGd8PJI7+Ax9IBCQMSHy5IyPh8vlssyHEb30lhRvRe2J43KdtPRMHK6qlDPfB/btgd8fwNDhI+D1evH6K7/WjVO7zOq5orwM765fBwTr8VTtSfmNhxURuxvXvyU/27JpIxrq66Lar1ju9XqxZ+eH6mKp9sRxGTMH9u3B2bNnZHsyiqeG+jq0njkj62vo8BFwOBw4Wl0l/19a8gGgiee09Ex5zGgYpUXrvgcegjM+PmS23yweYBGnqrT0TByqKJffaKht9Uh1JRL69pVtJy09E6dqT8Lr9dpKx67SEs3R7DGqv47WQyTlotcu1X5EL39mbcmqvGJF7RON7Ni+TaZlV2mJ7DdEOWvb7YF9e+Dz+eS79/REUr6IoJ4BYOP6t3DhQptm63Da/VVXHcIFX5vcx9HqKgQCASS5+8u61ea/ob4Oh6sqQ2Kpuckj+3M78WCHUZ5hoz8w21b1P6/8t8ybNu926cW2mWjPf1axdrS6Cg6HQ7aRUWkZaD1zBg31dbaO+Vn9qbBvxc3EORyynA4e+Asa6uvC4lHb7+jFhXr+tku7D6tyEba9v1nmr6K8DE2nT2NUWoZuuqKJV7vs9DnRxqRVu4BmTC3Kq7Tkg5BXJmjjpqMiaYfaPvHksRqcPXtGngO8yrjHazKWU2Pd6/WieMsmBAKBkONpmZ2Lohl/CUZjAgARxZzV2APB8tu4fl3IOVPbBiJtr4K2XzDrY8zqxIrdNhxJPOkxO+92JP2CWRxZtU2rc6hZXym2GzthEhA8x/j9fpw8VqOsGT62qigvw6GKcnU1Q7EoJz1qug4oYz6n0ymPsWfnh/L4WlZt1c51kVU9dUW2JohcLhfiHN1DBkMJiYnwtbWFBGJCYiL8fj98588hITFRdiBi+yPVlUCwMnf8uRgPP/qYvHtClZCYCFefBBQsWoxlK4vweMEibFy/TlZgy+fNcLsHwBV8AdaunTvQUF8Ht3sA+iW5sXjJUixbWYTZD8xB0coVqCgvw7o31sDX1obFS5YiN2+GekgAgMfTaDpgj3SiKxKtLS3w+8NPZmqelq0sQvb4iTLoXS4XCn/+rFwmbsfzer0o+tVyDBoyFMtWFsmydvbqDafTiZmzZsttChYtRp8+fW0NRqJllQ8jVs+wulwuOOPjkT1+otzv4iVLMTAlRZ5EcvNmyGUPP/oY4uIu3vIYK/MLCkPyZJe2zeiJZL/iNtBlK4tQ+PNnDWPYilE8IXiR3D85BS6XC6PSMnCs5iga6uvC2uuylUWYOWu2aVuywywtANAvyY3rhw+XE8liG6t4sEvsS325fmtLizw5VpSXofXMGYxMS0dySiqc8fEoLfkgpumwq7PqQU+s+5FLUV5GfWKk9Nqt1+uFr63tkgwq9I4fqdaWFrS2ntXdh1Gfa3Z+jHU86LHqDyKRlZ2DXy5fJdPZs+fFW8s7S7TnP7261sZag2bSx+VyoX9yihyrRXtMIxXlZdj4zlt48KFHsGxlkRxHmfU7/Qck68ZSR1mVixExfrsU8YoI+5xoY9KqXSSnpOLu+76Lze/+UV6INWi+8FHj5nIR9aeeb7WMxnJuzTtPYyWS8ZeW0ZggkpizM/bQI8oQUbZXvb7drI+BSZ1YibYNx1q06bfDqm1qGZ13jYjJEDGxl5aeGTK5JBjFUqRiXU5WY76S4q3YsX0bChYtxi+Xr5KT2XqM2qrd66JI6qmruFb9QM/Q4SMQH++UdwPBoDNOS8+Un6WlZ8oTsd72JcVbcbS6CvmPP4ncvBly4kdwuweEPMep8ngakZaeKWdOxUxq4nX9DJ8xRDBYxC1tR6urwr4tOVpdhSm3TMPQ4SPCvlVLTklFQt++nXYSTUhMhCP4rKaWx9NoWBYu5dlmaB5ZQrADWfrcM/Lz+QWFePPVIrS2nsXG9evC8tiZzPJhxnf+HPx+f8hFouhYWj5vlicUbRlo5Qaf2V2yuBDe4HuZ7r7vu+pqUZsffBZb+5yu3QG49mSvdurR7Fe7bv4TT0X8DK1VPJ08VoNpd8xAWmYW+ienYMcHF9dpbWnBZ/X18h1jsWCVFgS/ffE0NiL/8SflM8dW8aD3TZ0Rs8FKW5tPDmyqqw7Jk7SvrU32K7FKh12dUQ9GfOfPmfYjkebPqt5iRa9PjLRPMmu3HR2E2SW+WRPHT3L3R5zDoa4WFb0+Vy4zuAizioeOstMf2CXOAatWvIgGzfufOlO05z87sXakulL2ywj202J5NMc0U1FehorgO9TyH38SsOh3XC5XWCxpz9/RslMuesSXfJ0dr1p2+pxoY9JOu5j9wPdwrOZoWD7Fe7PUuLlSWY3lxASHiIeExMSoL2KjGX9p6Y0JXC6X7ZizM/bQmyQSbUuItL0aMepjrOrETLRtOJY6kn4rdtqmlt5516qvPFJdibvv+y5uGjsOzvh4HK2uUlcxjKVI4rkzykmky2zMVxK8sycrOwezH/weEIxpLau2anVdFGk9dRW27iDSm+X1eBqR5HbLi4DcvBkYmDIIu0pLZECLBq63PYKNwefzhXwmHKmuRFL//oYzhq0tLXDGxyNnwuSQx9yqqw5hdHqG6cVJa0uL4W2nDfV12LVzBx586JGQY4vOVe8kq0fMRhrdqQQAA1MGyWMkB389TW/292h1FZLcbt19qTPOycFH8cT/f/DYv8h1RQfj9XpxuqEeeTNnwaUzmyoGMLFmlg8zohPJmTBZpnfshEkhL16srjqEabdN1613dTJzVFpG1IMGlRrrruAtonadPFYDv9+P2Q9c7PgAYOas2RgxanRE+3W5XHjksR/L8on25GoWT9DEzoTJUwHNIOHksRo4HHG465775bodZZUW4d31v8fhqkrkP/6krH+zeIhUddUhZGZly7bqCt61eLqhXsbU0eoq2R9VVx0K2TZW6bCjM+rBiFU/Eo3OLi+jPjFSeu02N28Gktxu3QFarIljTM29Q342ZWou4q6NzQSRN/jNpLYuxDlKG99anREPWnb7AzvUMcnItHTbd2tEy+z8l2XyM+l2Yk30wxMmTw3rl4yOqUdcoIhH0ESdC7l5M+S+tOM3s37Hzvk7GnbKBUDIccXy0pIPLOM1N28G/uOF5bp1Egm7fU60MWnVLubMnQdnfLx8BEZLPM4x4+57cfDjMhk3801+2lsd+991z/0drku7zMZyR6or0adPX/mlscvlQs6EySHb29XRcR0MxgRWMaeyM/aIi4vDlKm5chvRBg/s2xNVezVi1MeY1YkVu224M3Uk/Vas2qYqmr5S3K12z+wHcKr2ZNhND0LL580h18ZZ2TkYmDJILm/ynEZcXJx8tC8rOweZWdlyeWeVk9mY774H5sjYb/KcRsDvV1cxbat2r4siraeuwtYdRG73APlODaGkeCvc7gEoWLQYCL41XHzzgeCMuXgreWtLC/r06Yv8J57C/13/Fh569DH5pnDts6VaFeVlGJWWgYcffQwPP/oYoPxaQZPnNJxOJw5XVYZM2JQUb0VaeqZMl/aXqLS/BCDe86KnpHgrWlta8OBDj8hjw+SXJ/SImX2zTq6pyYO8mbPkMcr379Xdf0N9HTauX4cHH3pE903vx2qOynK64GuTJwHf+XPoPyAZy1ZefB9Ts+aXLda9sQbzCwqxeMk/vsWK9TeNKqt8mFm9YmlIemtPHA95LleNR2jyWxr8RQxRDp/V1xtOECL4GNWDDz2CzKxsvPP719XFIbzB55lnzpqNmbNmI+APoKnJI5eLW3MffvQx5M2cFTZz7Q3efp7/xFMh9bSrdLvpfqGTzvhevWT5iLf4a49lhza9ajwJR6orkZmVjW3vb5b793q9eHPNb0PKGRG2Gb2yskqLIOK5YNFieUyjeIiUSL+2L1LvUmyor4OvrQ3O+Hh5N6PYtiPp0CsTM7GoByN6aYl1P2JWXl6vNyzmrfoNlVmfGAm9dqv+MkpFeRmmTM1FwaLFqA3+ilmsaPtScTv1rtISJPTtq64aNfHrNNq6sIqjSOJBL57M2Omb7Dqwbw9yJkyW6Wxu8pi+v0l7DhG/jGNF3aakeKvh+S9B80i+yk6seYMXnqPTM/DO2tfktpGec73B97aIuLrga8PH5WUYMSoNCI7ltPvSjlnM+h2r87dKbTvinZJaVuUiLwoaG7HomecR54iTeRflFkm8mlHTq93ebp8TaUwKZu1CXOTFOeJC8ijOXyJuEhL6yJh2Ka+EUKlj7MqDFWhWxiadxWwsV1FeJsf5YswkfsUsUtGM61RGY4JIYs7O2CMQCKDt/PmwNuD1eqNur3qM+hizOlGXq32nVRuOhtnx9FilvyNjDrO2aSTSvhLBSZbpd95t2Gahcz5vbvLg6OEqxPfqBQTTumP7Nhnz6nKrcoqW+ZjvbEjsb9m0Maz8zdqq1+u1dV0UTT11BbZ/5v5Kkxt8TKwjHUlnys2bAbd7gOGjbuIWOL2TAhERUSTELd/an6mlq8OP5v8EZXt3hw1+KXounccGKJw6Fs3KzkHOxMn47eqX1VWvOrmanwRXLwqJvimijXO17RNp2XrE7EojbnvW/sLKlaakeKvh5BAREVGsiEcPjO5CoSvbb1e/zMkhuuSysnMwOuPGkPdqVpSXfSMmh8R1gvr0A9E3iYhzvdeTEHXEVXUHUXLwPUBxcd0jvtXvSsOZWyIiioa4O0L7bgKjxxSIuiLeQWRM23+YPVZ0NRHXB+L1FdB5HIvom2R+QSGGXD8s6jjndSiZuaomiIiIiIiIiIiIKPauykfMiIiIiIiIiIgodjhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLu5bCde5/7f6Yawkp6TiJwufxrlzXpxubFAXX1Zz5s7DzHvvR+XBA2hvb1cXUwzNLyjEgw//AHkzZyFv5iyMGJWGsr271NViZs7cebj19rxOPQZFJis7B4/96xM49tejOH/unLrYkFU7jXa/kcjNm4H5Ty6U8fvFl1+g9vixkHVcLhd+UvjvSB08BJWHKuTnWdk5+PGTP8XZ1jMhfeD8gkIMu2FEyLqdJTdvBh586AeGZdjVaWPomm7d8JPCf4ejR/ewOu4MZnVjFfudITdvBv6/f30SLS3NUZ2z9eJd9P92+n3RjmJV/skpqfjxT36KkyeOdVr/0JVFUrexkpWdgyd++jRuv3Nm1HHaEZfinKPVmf1AZ46VzPq2yyGa6xERa1/hqw73R9EcX4/L5cKPnyzE3//+N3x3zvcR37sXao8f63B5a7dP7JeEnz79C3S75hrDfMdiDJObNwPfued+BAJ+zPnBP+Nw5UHdtIvzwn0PzEHidf06dMxLxc64sTNoy0r0yyItuXfMsOwz1es18e/mW3NRc+QwvnPvd2PWZ2hj2SxNVmLVtijUFXsH0Zy58zC/oFD9OGrzCwoxZ+48+fe6N9Zg6XPPwOv1hqxHxrKyc7D4uReQnJKqLtKVnJKK/3hhOQBg4YJ8+e/8+XO29qHWGXU9V0I7dbn6YMniQixckI8tmzZi2m3Tw+LX6/XiVO1JJF7XL+TzUWkZ6NnTiVFpGfIzl8sFZ3w8PJ7GkHWJtC517LtcLtw0JgdNTZ6QeI1ERXkZfrHof6GivAwIDpKd8fFYsrgQq1csVVeP+JxiJdbjhm+qWJS7Vd12BpfLhbyZs7Dt/c14+qnHZZx9k0XSD8SiXukfpkzNxWf1p5CWnqku+kZrqK/DL/7tKZQUb1UXXRZ33XM/fG1tWLggH+veWKMuvqTsXpfYGTd2Bm1ZrV6xFMkpqZhyyzSsfe0VW33m6hVLsXBBPpYsLkRzkwdbNm3EwgX5+MW/PYWG+jp19YjEqn/6ppznr/R8XLETRHR1c7lc+P68H+FwVWXY4PF/XvnvDnc0RJfKu+vXycH50eoqBAIBJLn7q6vB42lEktstT34ulwuDhgzFubNnQyaOhg4fAYfDgaPVVZqtiS6vocNHAAD27S7F8BEjOzyIAwC3ewB8bW22Lm7p6nI56tbZqzccDgdaW1rURUQxlZySCmd8PD76cDsS+vZFVnaOugpdIonX9UPL583qx1c0u+PGWBJjzuqqQ/KzkWnp8Pv9OHmsJmRdIivdrh+Z+bX6oZ45c+che/xEAMAFXxuKVr0EAPhh/nxsfvePId8Y5kyYjKJfLYezV2/8MH8+du3cgWm3TUdPZzwC/gDe+f3rcn29/c5+4HsYcv0weewtmzaipHhryLraz10uF/KfeAo1R6oxJjsn5Dgnj9Ug/4mn0C/JDQDy84TERJnO7/8wHy2fN4fMTM8vKJSfJaekIv/xJ9HTGQ8AKN+/13AW2yqNnsZGjBydjrNnz8gyEvsO+APYv/cjZGRm4dWi1YBF+QIIy3ftieN489UimWe1vEU6RHnUnjguJ3DmzJ0nL2RF+Yu85ubNwMxZsyFot9OTlZ2Du+/7Ll4tWm04GaSmRdS/7/w53Tpr8py2VQ9m+UDwxK/dT3OTB0W/Wo6xEybJshUdu15565Wdll48WtULDNpCQ32dTIOnsREZN2WFxLCoEzUt2vpSjzW/oFCWi8i73iDfKJaTU1Lxw/z5qDxUgfETb0acIy4kvdAp48qDFUgdPDgsHkRZle3bLb+t0n4GIKROrParxpRZuegttyobMduvV++iXER7TU5Jxffn/Qhl+3Zjyi3TZBpFfGrbnbacRayqbUjkfcf2bSF9irbs9KjxA518d1YatOXd3OSBp7ER8b16yW+2fpg/H3WffoqMm7JkmszqSE0PNO1du89oY1P0PZUVHyMja4yMI6t+HCZ9g3o8bVxp+xev14v5BYVIcrtRtOoljExLDzuX2s2Xei4x6oO1xLei7/1pg26dWrUNKG1gytRc3fO4oFfPoo/UO5drz4Ha7USsatMHnePBpI9VGeVVrUvtPqLp9636Ky2jtKv7UI8xZ+489E9OgcMRhz59+qLqk4qQi12jY5rl1W5Ziws7kW51vKFNl0izWV/04EOPIM4RB2jqRTuGgpJOtU+GwTndLN7UclDPOapo6wlK3Ilttf2A1+sN249Vv6l+rh0LqeciLe0YGDrnN6v6tXPeMRunqHEgYkxNFzRpFX2XOqY0igUzuXkzkJaeidUrloYdU6+tq+lX66nmSDXc/QcYHt+snuywU95qmWqPodcutOcA7bbNTR74/QGcbqjHujfWhOVVHcvrje9O1Z60zJ+6X20Z28mL3x/AwJQU3f4JBu1Nvf4w+jyS6xKzcSOUdGjrzOg8pPf50OEjwvrHv//t70gdPFgeZ8umjUhLz9Q9lspobCfaPaK4vtLrn7TUPl6v3vTOPUerqyIaH8GgnsR66r7K9+9FackHcnt132ZxCoNzgtE8x5XE1juIsrJzMHHKLfj1y8ux4e21aD1zBhd8PgDAmHHjUXPksHzub+iw4RiYkoryfXvg6N4d4ydNwZChw1C06iVseHstvsJXuOvue1Fz5DCG3TBSd7/bi99D4nX94G9vx/PP/Ay1x4/B5XIhI3MMfrPqv7Bp4x+QeF0/pGVkovLgAQDAuAmTMWjwEHmcUekZGJ1+I8r37cH24q0YMSoNx/9ag5eX/RKnGxtC0nmtwyH31d7ejqzsHNw0Jhvv/uFt9OrtkhdEv1n1Eqo+OYjb77xL91lHO2mM79ULLy9dgu3FW5HYLylk33/Ztwt3fude9OjRA+X79wIW5Sv2KfK9d1cpbr3tDky9LQ9/2vAO3vhdEUalZ2DQ4KEo27srpGNe8cJz2L97JybdMk2+7yHjpjFIy8jE7o8+xG9WvYQvvvwCk2+ehpaWZuz9qBTNTR4MHjIUv355ObYXv6fJebjRN2ai/4Bk7Nm5Q/eZYgAYnZGJL7/8Eiv/zwvYv3snMseOQ5+Evijfvxe7SkvC6uwH//wvOPVprUy7o3t31J/6VN2taT5ONzZg3MTJqD15QtbnpJunoj3gx9HqKoybMAnNTU043dgAl8uFe777TyjbtxuNn9Wblp1Wjx49IqoXBDuQQUOGYsV/PotNG/+A/gMGYmzOBJTt3YWhw4Yja+w41J26mPd+bjfu/M49iItz4PlnfobmJg/GTZgk3/uRmzcD026bjlUrlso2NzX3DlQePIDJt168aBPH+errr9DsOR1WR2axLNp1b1cfvLx0CUq3b0Pm2HEYdsNIlO3dFTKJ8JtVL2H/7p249Y48Gdfa9za0t7cjdfAQDB5yvSyLybfmYmBKKjZveAcDUlJlvKvtRd3vNd26mdZRbt4MTL/zbqx7Yw3e+F2RXC7eG5SbN8O0bKxO8ufPncONWWPhdMaj8lAFxk2cjD59EvD+5neRnjkWzU2ncbqxAbfenoeWz5tReagCc+bOw+j0DFlXVZ8cxIy770G3a67B4U8OYsy4CWj3t6P2+DGMmzgZaRk3IS7OgbK9u5DYLwmZY7JRsm2r6bsw1PhpbvJg0pRb5XsVOisNahz6fG249bbpOH/Oi7K9u9CrtwvjJ01BIBDA88/8TD4rb1ZH/ZMHYsSoNHxcXiaPl3HTGPTs6QzZZ7SxGfC3Y9yEyRgxKl221eYmD+6adR/Otp7B2TMtYf14jx49TOPOqL+pPX4spD9/7F9/Amd8PFYtfwGfNzfpnkvt5ks9l6h1o3K5XLgldzr+sm83Pj15IqxNWrUNoVdvlzxnbdqwPuw8rlV7/FjYOUXtO7XncnF+npp7B37/+u+w4e21+OLLL5AzYRJqjhzWHTdoGY1j1LIxyqvofw5XVWLFC89h23ub0X/AQNySe7vuOMSq37c6H2sZpV30eb62Njz/zM+w7b3NIWOs8+fOIeOmMbh+2A3404Z3sPa1V/BJxYGwcleJeDLK684d203LGprzcONn9SH5E21ZTdfpxgbTvmjvR6U4XP0J0jJuxB/XrcUf1621rBNH9+4RjaHUeLM656ix05F6EucXsbznt504fboB1/VLkuk162tKirfq1qtRexGxIfpO1fhJU/C3Cxfk+160bft0Y4Nl/Vqdd9Tzg3ac0qNHD8Pxh9l4PWXQYN0xZeWhirD0W/nOPfejuuoQao8fQ3zvXiHHtOqn1Hra9t5mTLr5ViQm9kPVJwd1j29WT3ZYlbdZ21LPRQi2i4bP6uS22vGpo0d3ZOdMQONn9ag8VGE6llfPJaMzMjE6/Ua8+4e3LfPW3t6uey1gJy9ZY8fhow//jN+sekm3fzJqb0YxFO11idW4cc7ceeiX5JZlK84NPyn8dwT8fjz/zM+wf/dOpA4aivq6T/Hjn/xUfr7tvc3Iys5B5phx2LRhPVpampF4XRJ+FRyb7Nu9EyNGpclroYmTbwk7lhER4yIGhGivr4z6J8HlcuGfvv8odgbrrOqTg7im2zXwKG2lbO+usHNPpOM+s+t4dV91pz7FbdPvwphxOSha9RL+vHVzSL8izuNGfXsk8xxXGtuPmMU5HPL2uIMH/qL7zYmeQCCAjevXyfUP7NsDn8+HkWnpQAT79Xq9eP2VX8tvLI9UV8LhcMDZq7dcZ8f2bXL7XaUlYcuNHK2ugsPhkLfYj0rLQOuZM2ior8PItHT4fD4cCHaaDfV1OFZzVPcdDXbSWLZvt1yu7tvr9aJ4yyYEAgG5vh0i3w31dWjyePBZ/Sk5c1lddQjO+Hi4XC75aEtpyQdA8Hhl+3aHPF9de+K4nMU8sG8Pzp49g4TERLk8lirKy/Du+nWAyTtcVGK51+vFnp0fqosls3yUFG+Vy0SZud0D0FBfh9YzZ2Tdah8FslN2Krv14greFqqNjdKSD+CMj0dy8DGP5iYP3vvTBiAYVxcu+LCrtAQAcPJYDXxtbTJ/aemZOFxVGdLm/P6AjG+n0yljcs/OD8PuAoCNWA4EAijesgler1eWhchPpHF9pLoSCX37yrympWfiVO3JsHRZ7deqjtLSM3GoolzWgVg+aMhQuFwuwKRscoPv23jz1SKYqa46JPeXlp6Jls+b4fV6EfC3Y1RaBpJTUpHQty+OVFfKetf2Ww31dThcVYm09EzZJtzuAUDwkY7KgxWIc3T/Rzm3tRn2mVra+KkoL8Nn9afgdg/o1DSocVhRXoZDFeUh6wQCARnHsFlHVmIRm9ve3yzTUFFehqbTp0P6fG1btYo7o/5G674HHoIzPl73rhwhFvkyMnbCJDgccfI2dLVNwqRtdAazc7n2EeVIb923O97Qy6soY9GOEOyntWMHRNDvW8WNSi/tYh8b178l11PHWABC0mCH3bxaqT1xXH5Lq9eWtemy6ov0xCqdRvEWTbuKpp6ysnOQ0LdvyPItmzaGxWekMYMOthcrVvVrdN6BzvlBO07xmow/zMbr0Ywp9Yj6EI+Bq8cUjOJGrScA2Lj+LVy40KbZOlQs6smovKNpW4Le+LSkeCtqTxyX65iVe2nJByGP6GnrKxp289Lc5JHtVmXW3szyYsTousTuuFE1dsIkAJDbifYwIi0dTqczJN3ac8qotAwE/O2ynsR4M9aP5EZzfWWXWLehvg4HD/xFXWwokvFRg8l1PJR9nTxWg7Nnz8i+So0Jq74dBueEq4GtCaKK8jJsfOctPPjQI1i2sgi5eTPUVWzzer3wtV3sJCPdb27eDCxbWYRlK4vw8KOPIS7u4m1oHaUNFpfLhf7JKfKixe0egH5JbixeslQeO3v8RMMOI5I0dsYz/C2fNxs+q5uQmAhXnwQULFos0zhz1mzZiGKptaUlZFBvZH5BYUi5mhEz8MtWFqHw589GnWaXy4XCnz8rj6u9zW9XaQn6J6fIzvZYzVE01Nd1uOzM6sXZqzecTidmzpot912waDH69OmrO0Bo8pyGr60NTZ7T6iK4gi9Azh4/Ue5r8ZKlGJiSgoTERJQUb8WO7dtQsGgxfrl8lTxp64kklrUijeuK8jK0njmDkWnpSA4+968dWAlW+zWro4HB/aovhtbGqVXZmB1bEPtz9x+IOEd3HKmuBDQTR4OHXo+2Nh9OHquB0+B9Gh5Po4wrj6cRg4YMRXJKKhKv6ycHH0OHj0BaembIs+bR6Kw0iDhUy9uM0TZ2+xI7rGLIiFHbhUXcuYITwEb9DQD0S3Lj+uHD8eaa30acLiHafAlp6Zkh57mHH30Mrj4JmJp7BxAc9Jm1jUspKzsHv1y+SvaTPXtevGXcit3xhlFe9crYd/4c/H6/4ZcoZv2+VdxoGaU9ITERfr8fvvP/+CZejLEiGZSrosmrHa0tLfD79SdX7PRFqs5Kp6C3fzPR1pPecj2RxIwQbXuJhln9almNU2Ay/jAbryPCMaWRUWkZIeVcsGgxXH0SMGVqrrqqLrv1qdWZ9RRN2xKMtlUZlXuD5otXvfqKlFF67ORFsKofo7zosbouseo/1r2xBr62NixecvFxUAT7Hb0vSd3uAWjyeEImGLR9VKLyriZx/SCuFfSOFWtW4x0jXq8XRb9ajkFDhmLZyiJ551UsRHodryX6aXVcKujFkrZvNzonXA1sTRAheOJ7+qnHseLFJZh22/SoMylODILd/eYGnykVb4Vf+9orpt/iROpIdSX6J6cgLfPis7vim1SPpxG1J46H/AqXeDu8Kpo0qh1aQmKi7QvxSLW2tOCz+nqZPvHP7i9kROLksRr4/f6QbzFV4rlukQ7xWJ2Z1cE37J+qPYn8J54K64ytuDTP1Yrjar8JEfWelpmF/skp8gK/M8vOd/4cWlvPYu1rr4Ts284vDqhEx7Ql+MsD2n9iVr+keCsWLsjHO79/HbMf/J7uxV40sSzonait4rq66hDS0jNN70ix2q9ZHX1WX2d40dTW5pOdu1HZlBRv1W3zKhH3k265Vf4NzSRHRmaW/IbH7EJGnPDFN5fjJ02B39+OmqPVON1Qj5vGjEOco3vYIClSnZ0GtbzNTsjak6pKW0cdYRVDRszSbRZ30LzLQXyu7W8Q/KbzxLFjyH/8yZA7diIVTb6g+ZZR7X+2bNoYcjeAUdu4lLKC76BateJFLFyQjxUvLjH9Vl5ld7yhl1e92AEAvz9g2gaMmMWN3jlFL+1mk6dGA1o7Yp1XISExEY7g+yVUdvoiVWelU9Dbv1W7iqaezJZrRRozHW0vkTKrXy2rcYrV+MNovB7NmFLlCt6hoqZt7WuvhN1VaUSvPpPc/RHncISsJ3R2PUXTtgS9bdXrOKtyF1+QqfUVDb30yGUWeRH06kewyoseo+sSu+PG1SuWYovml86MxhvqBJCI1ZbPm0PuThf07tZSjxVLVtdXVrxeL5Y+9wwWLsgHNI/ndVQk1/GRMoslcQ7WOydcDWxNEOXmzZAZ8p0/B1/w/UOioYrbtJJTUjHttukh28bFxYXMut91z/1A8BYso/3qUb/JGZWWYXqSjpTosCZMnorTDfXyOEerq5Dkdtuq0EjTeKS6En369JW3E7pcLuRMmCyX2ynfSJw8VgOHI07WQUdlZefgP178L90LBW/wFr+Zs2ZjjvKTkD947F8wYtRoODV3C4iOzojL5cIjj/1YdrzRDn7Vbx+SU1KR5L74YjEE0326oR4TJk8FNHER67LTEsfMmzkrbKAbjeqqQ4ad/30PzJH11eQ5jYDfr64CRBHLWq0tLaZxredodRWc8fHImTBZ944U2NivVR1VVx1CZla2zL8r+JPJor2blU1u3gz8xwvLdctUyxu8/fTiy8T/cavvyWM1aGvzIWXQYJk/sa62rkQbF+uIb99uzByDY389CgT7jUGDh8j9IngiVduZHbFMg5bY7+j0i4/VIdhfDEwZpK4awqqOmjynERcXJyees7JzkJmVrezFmFUMCTkTJsu2mJs3A0lut+5dbbCIO6v+Rnh3/e9xuKoy6kkiq3OJWV890uBXTrSPVZi1jUtJ/bZuZFq67W/a7Y43jPJ6tLoKTqczpJ7vuuf+kEfzImEWNyqjtIsJ6dkPfC9k3SS3Wz4eE41Y5XVgyiBZlqJf0T6qomWnL1JZpbOjYyi7/YUQbT3pLZ85a3ZYXxBJzKCD7aXl8+aQCeLZD3wvbNtI6ldlNk6xGn+IGNSO113KHahmY0qz/lA8MqK2H1FHZl96CuI8Je7ABIApU3MRd63+BFFH6smOaNqW4A1O5mnPiWMnTJIv5LVT7uLxwRl334uDH5fZig8jHcmLYNTeYn1dYnfciGBfIyZBtW1Pewy1Td51z/1wOp0oLfkg7G4h6EwoCdpjxZLd8Y6e5JRU/OCxf5F/66U7WpFcx0dKL5a0fbvROeFqcK36gZ7WlhY8+NAj8g3k5fv3yrsRirdswoMPPYLs8RNxwdeGj8vLMGJUmtw2EAig7fx5LFt58VlK8QZvr9drul/xxvBlK4uwZdPGkL8B4LP6+ogCfFdpCR586BFkZmXjnd+/ri6GN3iRPjo9A++sfU1+3lBfh43r14WkU31DuRBpGivKy5AQ/CWqmbNmh/zyDIJpsirfSHi9Xry55rchaUQEb0+vKC/DlKm5KFi0GLUnjlveJlpSvBVHq6vCjle+fy9qjhyWE0gi701NnpDt1TqL79ULi5dcnPHVxlEkxO3JDz/6GB5+9DFc8LWFNdgj1ZXIzMrGtvc3y/13tOysrAv+Co/IHwze8m9HSfFWuN0DULBosfysWf4CwlmZdwTTr8YxoohlLbE/0WbUuNbTELzDxxkfb/jcuNV+repI1JM2/+WaXzKwWzZWxCBBO1gR/UtCQp+Qgee6N9Zgztx5IXWlxpQYSIvtmjyncW3ctSEDY+3jbJGKRRr0iJgW+21u8uDo4SrE9+qlripZ1VFDfR12bN8m+w07+9SyiiHB09iIRc88jzhHnOzvG4K/RKSyijur/kbQlteWTRvVxaasziVGxAWv3u3sYmJwytRcVFcdiknbUKnnFKt3NRzYtwc5EybLfrK5yRPyTbs6btDGsNl4Q8usHyha9VJIPf+jX73YDiNhFTdaZmkv+tVy5D/xVNgYS+8uTEEtd/U801BfZ5pXu5qaPMibOSukLat507LTF2nZSWdHxlB2+wuhI/WkLm9u8mBX6Xa5b9iIGbVe33y1yLS9mBG/CCa23VVagoS+fUPWibR+tczGKVbjD3E+1Y7XvZovJY3GlHZMmZobdtcFNBMTORMmo8Zi8lUvLvXKT7Dq12Ih0raltTr4S24ifbUnjss7Q+yUu9H4J1odyQuC6TFqb1Z5ieV1Sa7yy3Vbgu9BUq8HyvfvhdfrDfu8ucmD//rPZ+ENviNPO8koJu7EONToWLFkdX2l9k/a847v/Dn0H5Ac1o/rUc/zVjEVyXV8pPRiSdu3J7n7G54T1HzYjd9LxfbP3NOlkav87DERkZms7BzkTJyM365+WV10xRG3b4sJH+o8PJfQ5TTH5CfU6erH+qVIMF46lzq2ygo+tvhq0eqYTwRR12DrETO6NMRtknrf5hIR6akoL7sqJody82ZgYMqgqO90Ivt4LiEioitBVnYORmfcaPnUAUVHfcwPOo8tEkWKdxBdRskpqch//En0dP7jWWPt4xRERFcr9ZbmWN3SS+F4LqErDe8Y+GZj/ZIVV/Clxf2S3FfkIzREZIwTREREREREREREXRwfMSMiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi7uWwnXuf+3+mFnc7lc+Enhv8PRoztqjx9TF0csOSUVP336F+h2zTUx2R9FLys7Bz9+8qc423oGpxsb1MXfKLGO4ytZV6rXSM2ZOw8z770flQcPoL29XV1MRERERER0VbgkE0RZ2Tl47F+fwLG/HsX5c+fQo0cPjJswGQ2f1UV1Ya3u7/y5c9jxQXFU+zIzZ+483Hp7Hsr27pKfzS8oxLAbRqDyUEXIupeKXpoiYZV+tWwjdbqxATs+eL/TJhE6mv9Y6mgcX006u16vZpWHKrCrtOSKmhzqaDtWXUntjoiIiIiIOgcfMSMiIiIiIiIi6uK6XT8y82v1Q5XL5UL+E0+hsuJjZGSNQb8kNwBgy6aNKCneCgQf88p//En0dMYDAJqbPCj61XKMnTAJM2fNlvuqPXEcb75ahPwnnkLNkWqMyc5BT2c8Av4A3vn966goLws5pjhW7YnjWL1iKXLzZhjur2zfbpmerOwcPPjQI4hzxAEAyvfvxbo31sjthDlz5yF7/ET5t8jT/IJCDLl+mPz8z9u2IvOmf+Rdm15tmrSfJ6ek4of581F5qALjJ96MOEccyvfvRWnJB7KsLvjaULTqJTTU18n1d+3cgWm3TQ8rFzVN2vIX1HIT+/edPxfyuVreAEzL1tPYiJGj03H27Bn8/W9/R5OnMaQ85xcUAgA2rn8LP8yfj83v/hEnj9VEXM+7SkuQkZmFze/+MSRt4hh6+VdjT1umqkjrBDrlImJJpF3EnYi5QxXlIcvVGNaj5kG0H6/XG7Ke2GckZaounzN3HhKv6wcAsjyN2ocgyk3Ui9q+RF1o25MoRwAh2yJYpjkTJss8qmk2Kyu9YzTU11nuw2i7aGJCS5sXAGH1o21H/ZLcIfVhp2+FTj+lrS9xfL8/gIEpKThyuAqjRqfLdY36Tu0+zGLCqN0REREREdE3i61HzMSjNCNGpeNPG97BG78rQnOTB3fNuk++k2TcxMmoPXkCv1n1Eqo+OYhJN09Fe8CPkuKtaG7yYPCQofj1y8uxvfg9ub9Bg4egaNVL2PD2WoxKz8Do9BtRefAAevTogfwnnsKp2pNY8cJz2L97JybdMg2OHt1N9yce9cnKzsHsB7+HP657E2/8rghVnxxEQkJf/LXmSEi+XC4XMjLH4Der/gubNv4Bidf1Q1pGJioPHsDOHduReF0/+Nvb8fwzP8Oxo0ewq7QEI0al4fhfa/Dysl/idGMDcvNmYNpt07FqxVJseHstvsJXmJp7ByoPHoCje3eMnzQFvV198PLSJag79Slum34XxozLQdGql/DnrZuROXYc+iT0ReWhCvTq7cL4SVMwZOgwWS5f4Svcdfe9qDlyGNuL3wtJk95jTaMzMvHll19i5f95Aft375T7L9+/Vzf9WrXHjxmWbXyvXnh56RJsL96KnvFOWU7t7e1ITknF5FtuxZ/f3wKfrw1jxo1HzZHDOHumJaJ63vbeZtx+50wkJvZD1ScHw9JXtndXWP7FxMrhqkq5j/4DBuKW3Nt13wkjythunQDA1Nw78PvXf4cNb6/FF19+gZwJk1Bz5DAC/nYZd336JODBhx7Btvc3Y/PGP8gLf70Y1qs3o/ajrmu37fja2vD8Mz/Dtvc2h8TQ+XPnkHHTGKRlZGL3Rx/iN6tewhdffoHJN09DS0tzWJkLvXq7ZL3+7YIP//T9R7Hzwz/L9F7T7RoMSB6IiVNuwa9fXo4Nb69F65kzuODzAYDcVux/6LDhGJiSivJ9e8LiwKyssrJzdI9xTbdupvuYM3ceBg0ZihX/+Sw2bfwD+g8YiLE5E1C2d1dUMaGlzQuAkPrZu6sUt952B6belif7zlHpGRg0eCjK9u6y1bfOmTsPo9MzZB9T9clBzLj7HvnOtaHDhiNr7Dh8FKyPj8v2hbVjmMSxVUxs2rA+rN0REREREdE3T0SPmG17f7O8A6CivAxNp09jVFoGAKCkeKv8Vrmhvg5NHg/c7gEh26t2bN8mv5HfVVoCh8MBZ6/eGDp8BBwOB0pLPgAAeL1elO3bjbT0TGUP+qZMzcXhyk9kWhvq67Bl00Z1NXi9Xrz+yq/lXRpHqitlGuxKS8/E4apKmY8D+/bA7w9g6PARAIBAIIDiLZvg9Xpx8lgNzp49I9f3er04VXtSfnMv1t+4fl3I/nw+H0am/eOOADMV5WV4d/06IJg/df/RKtu3W5bTgeCF8NgJkwAAI9PS4ff7cfJYTcg2gt16RvAOpAsX2jRbmxuZlg6fz4f3/rRBflZa8gEcDoesA1WkdfI/r/y3TP/R6ioEAgEkufvL5S5XH8x+YA62vb9ZtgE1b1YxHGn7sSrTjevfkuvqxVDtiePyeAf27cHZs2eQkJgol9sh0tdQX4eDB/4CAIhzOGTZHDzwF907blSRlpXeMcz24XK5MGjI0JAYLi35AM74eCSnpAJRxIQVUT+iLj+rPyX7o+qqQ3DGx8Plcsn1jfpWkXZtfTfU1+FwVWVI+TQ3eWS7NGIVx7GICSIiIiIiunpFNEGkavm8Wf7f5XKh8OfPYtnKIixbWRTySEKkEhIT4eqTgIJFi+X+Zs6aHXZRpcflcsEZHw+Pp1FdpCs3b4Y8xsOPPoa4uIuPzNghjpU9fqLcx+IlSzEwJUX3wsrr9cLX1mY7bdBsE4n5BYUyPdrHUmJFXDCLC9S09MyQi2+7EhIT4ff74Tsf/Ut03e4B8LW1hRzbd/4c/H6/bh2o7NRJVnYOfrl8FZatLELBosXo2fPiY2DClKm5OFxVGfLYTaQxHKv2o1emIo9mE05a2vj55fJVyMrOCVnu9XpR9KvlGDRkKJatLJKPF1aUl2HjO2/hwYcewbKVRcjNmxGynZFIysroGGb76D8gGU6nEzNnzZbLChYtRp8+fUMmSAQ7MRGJls+bQ/pKO8T6zl694XA40NrSErLc42nULR8zVnFMRERERERdW4cmiMQ36uJxmrJ9u7FwQT4WLshH7Ynj6uq2tba04LP6eixZXCj3t3BBPpY+94zlJEQkF8Pi3R3iOGtfewWBQEBdzZA41pZNG0PSuXBBfsze0SEmoeyaX1CIls+bZTrK9+9VV4mJI9WVSOjbFzeNHQdnfDyOVlepq9jidDpD7thKcvdHnMMRso4Zowtlvz8QdlEdjazsHMOoo7EAAIcmSURBVNx933exasWLWLggHyteXBJ2h9Ou0hKMTs/AnLnz5GeRxHAs209rS4vhXXB2JzxWr1gq0/H0U4+HvQsKwdhf+twzWLggH9C8g6qivAxPP/U4Vry4BNNum25rkiiSsoLBMcz2cbqxAa2tZ7H2tVdClhnl7Uog+lazyU51YtSMnTgmIiIiIqKuLaIJopwJk+WFeG7eDCS53Rcf1VC+5U5OSUWS++LLVqNx8lgNHI443HXP/eoiW6qrDiEzK1ve+ZCckhryclZBvftkVFpGRHcQIXisabdNl4+qdFRcXBymTM2Vf4sysHp8BDp3T4nHUzpDRXkZWs+cwT2zH8Cp2pO2HiVSiUmlqbl3yM+mTM1F3LX2J4iOVlfB6XSGxMpd99wPhyPO8JG3SKh35IxMSw+788LrPYuiVS+FTBJFEsOxbD8nj9XA7/dj9gPfk5+JthrtJJ4qOSUVP3jsX+Tf4m6X3LwZckLId/4cfMH3D4lJDvE4anJKKqbdNl1uH0lZGR3DbB9erxenG+qRN3NW2ETilcKobxV362n7GFF+1VWHlL0YsxPHkcjNm4H/eGF5zPo9IiIiIiK6/CKaIPI0NmLRM89j2coiTL/zbvmunIb6OhyrOYqHH30My1YWIf/xJ+WFGzSTCQWLFss7Dcx4vV68uea3GJ2eIR8J0T5OYrW/kuKt2Pb+ZpmegkWL8bf2v6mrobTkAyS53XL//ZNTQu4g0i4Xx95VWoLMrGz56E1J8VYcrqoMebSl8OfPRn0hGggE0Hb+vNzX6PQMvLnmt3ISSy9Ngjf43hXxKM2iZ56H3x96R5SafpVV2WpVVx3Ct3s6caS6Ul1kS0N9HTauX4fMrGyZ38/qT5ne2aDmv6G+Tk7OiH0MGjJU9xfAoiEm5hYvWYplK4uQM2Gybvq0eSn8+bMAYBrD6rZm7ScS4vEvZ3y8POa026Yb/gJXNHznz6H/gOSQ8n7z1SK0trRg+p13Y1nwUctTtSdRUrwVXq8XxVs2yXrOf/xJfKy5c8eqvWuZHcNsH+veWANfW5usx2WaR+OuBEZ9K4Jp1/YxBYsWY8f2baZ3Kart2G4cG1HbHRERERERffNE9DP32p+Rp9hTf0r8Sper/FR5LIhHYV4tWh2zCQ2iKxX7ViIiIiIiulJEdAcRkSAec4nm5dRGXC4X8mbOQuuZM5wcIiIiIiIiIrqEOEFEEZtfUIiCRYvDfrkrUuovdy1eshS+tjasXrFUXZWIiIiIiIiIOpGtR8yIiIiIiIiIiOibi3cQERERERERERF1cZwgIiIiIiIiIiLq4jhBRERERERERETUxXGCiIiIiIiIiIioi+MEERERERERERFRF8cJIiIiIiIiIiKiLo4TREREREREREREXRwniIiIiIiIiIiIujhOEBERERERERERdXGcICIiIiIiIiIi6uI4QURERERERERE1MVxgoiIiIiIiIiIqIvjBBERERERERERURfHCSIiIiIiIiIioi6OE0RERERERERERF0cJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFfSvhOvf/Vj/UM7+gEMNuGIHKQxUAAJfLhZ8U/jvue2AOEq/rJz+n6M2ZOw+33p6Hsr271EXfeLl5M/DgQz9A5cEDaG9vVxeHiaSsIlm3M0WaRwTTPvPe+yPahsxZ1cP8gkKMnzTlssfL5WZVTma02/bo0QM/Kfx3OHp0R+3xY+qqMZWVnYMfP/lTnG09g9ONDerimBPnwUuRNz1WdXS19x/qOOPW2/Ms22ZySip++vQv0O2aayzrxKr8hEtdz5fznNXZMaPWaaRjx0jqV486lv2mM8uv3fi/3LKyc/DET5/G7XfOREtL8yXp268UHW0vZsz6NbNlkTCKv8vZxwlXQhouFW09dHYfH2vJKan4ycKnce6ct8Nt/1KPES+1+QWFePDhH2DEqLQOx3XUdxDddc/98LW1YeGCfKx7Y426+BtnfkEh5sydp34sJaekYvFzLyArO0ddRFexy12v695Yg6XPPQOv16suIpus2q5q9YqlWL1iqfoxXQUqysvwi0X/CxXlZeqimMjKzsHi515AckqquuiSiDSWr/b+Qx1n2GmbDfV1+MW/PYWS4q3qIrKhs2NGrdNIqfV7udtkZ7rS8zZn7jzMLygM+SzSPsqKy+VC3sxZ2Pb+Zjz91OOd1rdfqTraXujyudzXD0Y6u4/vKL1+JVY6e4wYrVjkOTdvBpzx8ViyuNBynGRH1BNEidf1Q8vnzerHRERERB3GccY3D+uUIuHs1RsOhwOtLS3qoi6B7YWI7HC7B8DX1hazib9u14/M/Fr9UM/8gkK0fN6M9/60AflPPIV+SW4AQMAfwDu/fz1kNk6sq53tFjNjq1csRXJKKvIffxI9nfEAgAu+NhStegkN9XVITknFD/PnY/O7f5T7zM2bgZwJk1H0q+UhGXe5XMh/4imU7dstv03SHgfBWbnE6/ph9YqlmDN3HrLHT5Tbb9m0ESXFW+V+ao5UY0x2Dno642W+Th6rscxvVnYOHnzoEcQ54gAAzU0emVb1mOX79xp+CyDSCgBDrh8GKOur5SaOM3bCpLDy0ZYZgJA81J44bji7mJs3AzNnzZZ/G6VXlJmnsREjR6fj7NkzlscySr/X6zWsY0Gbrgu+NtTXnYLD0d1w39p0i3Jt+bxZ1oU2XSIvIs0iHpPc/W3VqzZ+zYg8VlZ8jNy8GYCyTxH7dZ9+ioybslB74jiqqw6F1aNenIp41JZDwB/A/r0fISMzC68WrQ5Ln17c1544jjdfLZLloe5fLSuzWFLbhdrejNqU2g5E+U7NvUOWuRqXevXhO39O9zgJiYnImTAZnsZGZNyUBSj50PYZok4qD1Vg/MSbEeeIC6vvSMpcb3/l+/eitOQDuQ+z/UOJt2jaoZ75BYWyzxH7H5mWblpOaj1q06XX/2j7ab360vb/ZuWt7QuamzzwNDYivlevkPoS5w81lmARO7UnjgNA2PkLOn2jtq2YtUl1O3F8vfanbiuoZW03ltV+VVvP2r5HS403kV5tu4DO+deqfzPbtxrH58+dBQD0DdadyO+otAzdNIhyaW7yYO2a3+LheT+S6VLXMYpTtRy01LxCiRto8pKVnYO77/uubP8izzu2bwtJj3ZfKqu4NYopO9uKdYxiXlsmzl69Lduj3f5PrQdtnfZPToHDEYc+ffrig62bMXnqNMMxIDT9CYCwNmnWz8FgfKqlLVtte9QbW45Oz5DnJrMxhtl+oVM2zU0efHLwAG6bfpfcXuxPXVc9jrZum5s88PsDON1Qr5vfjrRZbV8CAH/ethWZN40Jq9+K8jLDvOuNebR5UccQ2vatnvOcvXpbnicjGesIeu1VfHaq9iTWvbEmrIy0x1b7AyhjDCNqPQf8AWx+dz1unnZ7WN7HTpik2x/o5VubTzVvorwPVZTLaz3RztR+MjdvBqbdNt1y7GvU3tQyMCtDdR9z5s7DoCFDZXrM+nE7faVZuzXq57Xbq+ejsn270drSEha7avoizZc2LWo/rDLrByLZr97YUPR3evnWxpE69jf6XEvtV7Zs2oij1VURnYegc84TrMaI2jzqXW9YxYzabtV0qsubmzz4+9/+jtTBgyHolQt08qi2EbXc9PYRiYjvIPJ6vVj63DOoPXEc5fv36t7yWV11CIOGDIXL5QKCmUro2xe7SktkBg9XVWLhgnwsXJCPw1WV+P68H8n17fJ6vThVexJp6ZlA8DhJbjeS3G4kB2/JTbyuH6qrDsHlcqF79x5YsrgQCxfko3z/XuRMmBxyzDHZOSha9RIWLsjHZ/WnkDdzFgBY5reivAyrVrwI79lWrH3tFXnrnhhArHhxCRYuyMeKF5dgdHqGPBHrGXL9MFRXHcLCBfnYsmkjMrOy5e2JI9PSsWP7Nrkvp9OJsRMm4Wh1FRwOB4YOHwEEAzBnwmTZsYsT2cIF+ViyuBDO+HjDNKQOGiLTu2XTRoxOz5Blqcc9YABefPbfsfS5ZwCLYxml34o4EYl0bVy/DkOvv0Eu1w7Axb6HjxgZclunaDjadM0J3gY9dPgI1Byplst8Ph+m5t5hWq+DhgyVsXS4qhKzH/iePJaZfkluDB1+g4x9X1sbvv/DfLk8Li4O8b16YeGCfMPBg16culyusHJ48dl/x4hRaYiLu9gZGxH7W/HiEiS53Vj0zPMo3rJJ7n/K1FxAGRTp1a+WS3NbuKiTM59/LvchbpleuCAf297fjNkPzAmJM207aPJ4ULBoMRCsPzUujerDrK/ql+RGe/vfsXBBPta+9goGpgzSzQeCdTJiVBpefPbfZXyI+o6mzLX7W/vaK8jMykb+40+iaNVLIfEHzf6t+stI2qFKXPiI/e/b9ZFcZlZORu3GilF9CWblrfYFxVs2YeTodM3ew5n1qWpaqqsOhZxktUqKt2Lta6/Ae7YVK15cEtI+jdokbPSpZtsKsYjlXM3txwsX5GPnjj+rqwAAZj/wPRlvSxYX4tPaE+oqhsz6N7Wt6PXTIo7/8z8W4z//Y7FufgW1P1q4IB81R6pD1kEH4tSM2bn95LEa+P1+jEy7GJcj09LR0xkvxynOXr0BAEerq0L2qTKLW6uYMts2kpiHRXtU69Ss/zOL4aQkN4q3bMLTTz2OI4fNy0XLrE1GQ+1ftr2/WbbHN18tkv1ockoqho8YiY3r18mBv9kYw2y/RnG8+8OSsLyp66p9u1q3Zft2Y2BKiiaH4aJts6tXLEX5/r2oPXEcCxfkY+ufNurWr1neYTHm0RuDtZ0/ByjnPDE5ZHWetDvW0VKvMxDsUxwOB0pLPrB9jo6UXnupCr7HR5t3r9dr2R/YOceIi/dt728Ou7BWrzEAIC09E4erKg0nKLSyx0/EspVFIf+0ky5WZai9pnS5XBg0ZCj69Okr0+N2D8Cp2pNhk0Ow2VfCoN2a9fNW9GJXTV8k+VLbtjpu0lLXNesHsrJzMHzESJnHDe+8JZeZjQ2NuEzG/nqfq9R+RUxyRHIe0vZTdkRyvSHWF8vVmDEbb6j998JgX//6b1fr5lnLqo0YlVtHRDxBZMeBfXsAQF78j0xLh9/vx8ljNRiZlg6fz4f3/rRBrl9a8kFY52PXkepKxDm6w+VyIcndH00eD1pbzyLJ3R/JKalwxsfjaHUVvF4vXn/l17KBHqmuhMPhkIM1ANixfZvs7HaVloQtj4Ro7Np9NtTX4XBVZciJRlV74ris2AP79uDs2TNISEwEgoMhsayhvg5NHg/c7gFoqK9D65kzGJWWAWhOXkerq0JOZAiedMr27TZMw/+88t8yvUerqxAIBJDk7q+uJpXt2y3L1OpYRum3op6IKsrLcKiiXC4XMSXirqG+DsdqjsryQLBcxUlPpEt0zBXlZXh3/Tq57FTtSTmjrBL1qs13ackHcMbHh3QgRpqbPHjz1SL5967SEiT07Su3DQQC2FVaotkinFGcquXg9XpRvGUTAoGAsodQYn+iTj6rPyUH7tVVh+CMj4fL5bKsXz2ifhvq63DwwF/kPjau/8dJ6MC+PfD5fPKCCko7qK46hAu+NnlcbVxGWx/NTR7ZB1WUl+Gz+lOGsRgIBFC8ZRO8Xq/MsyiTaMpcu7+Tx2pw9uwZGd9q/NntLyNph1pZ2TlI6Ns3pD62bNoo48usnCJpN4Kd+jIrb6u+QI9Rn6qXlpLiragN3lERCaM2CRt9qtm2dpjVkcrpdMp979n5ocy3StSj1+vFnp0fqosNmfVvalvR66e1dWFFjC+0bePd9evkBaQQTZyasTq3i2OIOnC7B6DyYIUcp4xMS4evrU1ua8QobmEjpoy2jSbmzdqjWqdeG/2fHu0553JS+5cD+/bA7w9g6PARMu85EybjwYcfxbGaoyFpNhtjmO3XKI712oFZ3x5N3SIGbdaKWd5hc8yjR5tPu+dJu2MdVWnJB0jo21decI5Ky0DrmTNoqK+zfexYUvtJq/7A6hzjcvXB7AfmYNv7m3UvLEW9iwm05OCX/keqK9VVdZXv3ysvasW/8v175XKrMhST6c5eveHs1Ru+tjY0NXlkn9Y/OcUwLVZlY9RuB6akmvbzsWA3X3ptWx03CXrrWvUDcQ6HLJODB/6ChuCdb2ZjQyvq2N/qcyuRnIci7afsXm9o19eLGZfF9WQkfb3Kqo10hk6ZIBKFIhpRWnqmDFa9Z+R858/B7/fLwU8kmjynER/vxNDhIzAqLQPVVYdwuqEeo9IywgZiuXkz5Oz1w48+pvstV6wYPTft8TQanoSsuFwuFP78WZkH7bd+u0pL0D85BS6XC6PSMnCs5iga6usudjR9ElCwaLHcbuas2YZpyMrOwS+Xr8KylUUoWLQYPXtevJXNDqtjmaXfiMvlgjM+Hh5Po7pIcrsHoF+SG4uXLA35dsLsIqC1pQV+/z8GsfMLCnW/2VA5e/WG0+nEzFmz5foFixajT5++phNpRpo8p+H3+9WPo6LXtiLV8nmz4fPuVvWr5fV6UfSr5Rg0ZCiWrSyS30QkJCbC7/fDp7mI83q98LW1GV7Utra0oLX1bMg2QqzrI1IdLXORd6P41tu/VX8ZST3p1Uck7LYboSP1Fd+rt2VfEAmjPjrWOtKnxlJJ8Vbs2L4NBYsW45fLVxl+uya+xV+2sgiFP382LGYioe3foumnzei1DSORxqkZo7jRnts9nkYMGjIUySmpSLyun5xkGzp8BNLSM1FddQgIftMr0rVsZZGtb6bRgZgySnu0IqmDK50Ya2jvdli8ZCkGpqSEfEnna2uDwxEXMlDXI8YYot8y2m8kZWjWt/cfkByTuo1lm7VTprGgV4ZW50mzsY6qQfMlrCt44S4mtaI5dqxF2x8IU6bm4nBVpe7kkHCkujJk4rD1zJmYTepalaEYn4xMS8fItHS0fN6Mgx+XIS09U14cnzxWI7fVirRsZLuNj9dtTx25hlPZzVck46ZI+/iK8jJsfOctPPjQIyHnoGjHhkZjf6PPY6Gj/ZSW2fWGEbvXk3pxbpfetp3dz3TKBBE0nclNY8fBGbyLByaNy+8P2A5oLdFx3zRmHBKv64ej1VU4Ul2J/skpGDRoqDwB5AafuRS33K197ZWIv+WKhFnFqZVsh0vzjKeYgdfOCIvOMS0zK2Q2vbWlBZ/V18t8i396tztmBZ+VXrXiRSwM3qZ34UJbyDpmzI4FzXsD9NJvxGjyQNvwPZ5G1AZvq9P+U29X1kpITIQj+ByseA5YbKf9ZkPlO38Ora1nsfa1V0KOpfcIhB1J7v5wOBzqx1FT21ZCYmLMJkLN6leNJQTrbulzz2Dhgou3q88vKERrS0vYt1dCNBf+sa6PaHRmmUfTX0ZST2b1YSWSdiPEor7M+oJIxcXFhfTR4oImVjrap8ZaSfFWLFyQj3d+/zpmP/g900mihQvycar2JPKfeCos/uzS9m/R9NNmjNqGKpo4NWPn3C4eyRg/aQr8/nbUHK3G6YZ63DRmHOIc3WXbXffGmpCyMLtAEzoaU7GOebUOYtn/XUpirLFl08awGBX1kpWdA2fwwtHq8Xgxxmg7f850v3bjGBZ9++nGhrC4jKZuY9lm7ZRpLBiVodl5MlLicaC0zIvvehNj7ktxbDMd7Q8Q/IJ5dHqGfExGj/bRWe0kdyxYlaFXc9PB8BtG4kh1JY5WV8EZH48h1w9DwN8eNrZBlGUj221bW1h7EqK5htNjN1+RjJv0zk9W/UBFeRmefupxrHhxCabdNh25eTM6NDbUG/ubfd5RHe2nOsru9aRRnNthtG1n9jOdNkFUUV6G1jNncM/sB3Cq9mTILX5OpxN33XO/XPeue+6HwxGHk8dqZHCLW8OSU1Ix7bbpcl091VWHcP0NN8Dvb0dDfZ3suAcNHiInStTZt1FpGZ06iBENf9pt0+UtgCIv0XSs6qxwcvB9S4LX68XphnpMmDwV0Jy8Th6rgcMRF1LeRtQZ45Fp6Zaz7Vpmx7JKv5mWz5tDngHNys7BwJRBcvnR6iokud2m374OTBkkL4ZEPZQF39Hk1NyV4ArenmlElLPeM9wIdg5mJ9l+SW45sHQFn8kVd3t11JHqSvTp0zdk/zkTJqurRc2sflXJKan4wWP/Iv8WE7VikKF9djo3bwaS3G45iRwJq/robJ1d5lb9pZ5I6kmvPmbOmh1227JKDDjsthuhI/XVdv4cTtWeNO0LItEQfMxA+y66sRMmyZcHxkJH+9RYuu+BObIPbPKcRkDnzkWXy4VHHvuxLA/tpK3H0xjyfr+77rk/rKzM+jc7/XQk9NrGfQ/MQXzIoxP24zTL5k+K2zm3iy+ubswcg2N/PQoE+4pBg4cAJt9229GRmIp1zHdG/xfNGFArKzsH//HifxlOfpqprjoUUq9aySmpuPu+76J4yyZsfvePmHLLtJD1jMYYXq/XdL9GcazXP5r17WIyJtK67ew2a5b3WNErQ6vzZKTEo3Ez7r4XBz8uk9cSVsdu8pxGXFwcRgYfoc/KzkFmVrZcNzdvRofu1OxIfyB4vWdRtOol00kib/BRmmm35yHO0V0+ztOR9iZYlSGCfU3fvolwxvfGyWM1wT62BWPGjTe8prJTNkbt9rP6Ost+3s450YqdfEUyboq0H8jNmyHbt+/8Ofh8PsBibGiW72SDsb/R57EQi34qEkYxA4vrSb04N+rrVXrbqm0k1jptggjBE8O3ezpDng1tqK+THZG4BWuQ5q3t3uAz7JlZ2Vi2sgj5jz+Jjy2+WT5aXQV8/bUciHm9XgT87Wj3t8uCKy35AElutzxm/+SUiO4g2lVagsysbMNb88Uzjw8/+pjs7Ne9sQaHqyrl7cAFixZjR/CXTCKl3b8oF9GQhSPVlUhKcoecvLxeL95c89uQ8tbeRqglOnxxm17OhMmWs+1aZseyk34jajnmzZyFo5oXWTbU12Hj+nWYfufd8phqPTU1eZA3c5asB3E7rTjpiVs3Fz3zfMitgkb16mtrC7mdcX5BIVwuF+Ic3UPiXdXc5MFNY3KwLHirta+tLeyFgNGqKC/Dtvc3h+Sl5kh1RHFuxqx+Vb7z59B/QLJcZ9CQoXjz1SJ4g7eZOuPj5bJpNn4Jw4xRfQhWbbcjOrvMrfpLPZHUk159ZGSNkQMqI1btxoxVfZlZ98Ya+RLBZTp9QaTEuzdEWtLSM03vbBRffBQsWmwrzR3tU1UdiWWv96zsfwsWLcaunTvCvn30er2I79VLpnfabdPx5prfwuv1oqR4a0jZd+/eA81NnpDtzfo3O/10JPTaxsCUQSHvIIokThMSE+V7Rayo5yS9c3t11SFcG3etnPhu8pzGtXHX4nRDvWHbtaOjMRVpzJvpjP4v0jFgpG3STEnx1pB6XRZ8zHJgSiq+P+9H8rGaivIyHKs5ivzHn5QXSUZjDLP9ulwuwzj2er1hebPq21evuNjmIqnbjrRZ7bhapEHto8zyHit6ZWh1noyUN3iRjq+/Dvkyy+rYDfV12LF9m2wj6jnLbfKCZTs62h8Ioq4zs7JR+PNnQybaBXGt1dE+TGVVhghOWLT720OOff7cubD60LJTNmbt1qqftzon6l0/qOzmK5JxUyT9QGtLi2zfi5csxanak/LayGhsaJZvo7G/0ed69PoVM1b9VKwZxYzVeEMvzkVfb5VnvW3VNhJrtn/mPhq5Jj89SPRNk5Wdg5yJk/Hb1S+riy4btsFLj2V+aYlbemMx0erS+Uljsudqj/sfzf8JyvbuDps0+6aLdcxf7XEQjTk2fracrn6xrmeXy4Xv/zAfG9e/ZWti+nJLVn4inKiriXUfcCXrtDuI1Ftsib7pKsrLrqjJIdEGO/LtFEWGZX5p5ebNwMCUQaZ37UXirnvuh9PpNPxGkr65frv65S550RPLmGf/R99UWdk5GJ1xY1S/uGbE6/Vi9YqlV8XkEADMfuB7MX05NRFduTrlDqL5BYUYcv0wlO/fG5NvdYnIWnJKKvIffxI9nf94xpptsHOxzC+t3LwZmDlrtvw74A/gnd+/HvWAdc7ceSG/MnHB19ahxx27sq5458jVKJYxz/7voq70rXJXI+6w65fkxpZNG2Nyl93VRpx3m5s87N+pS+tKfX2nTBAREREREREREdHVo9MeMSMiIiIiIiIioqsDJ4iIiIiIiIiIiLo4ThAREREREREREXVxnCAiIiIiIiIiIuriOEFERERERERERNTFfSvhOvf/Vj/8pktOScVPn/4Ful1zDWqPH1MXX/FcLhd+UvjvcPTo3uH0R1oWySmp+MnCp3HunBenGxvUxZdcLNMjyuI7934XeTNnYcSoNJTt3aWuJqnr582chcTr+qHyUIW6akRiWb9Xgzlz5+HW2/NQtncX5sydh5n33o/KgwfQ3t6uroqs7Bz8+Mmf4mzrmajrO5YxEym99pabNwPzn1yIm2/NxTXdrsGj+f/aofzpsSrXy2l+QSEefPgHlu0NndA2xLFF+829YwZaWppjWvZa8wsKMX7SFMt8GtG2Fa3cvBl48KEfoPLgAfTo0SNmZRSL9na5aONq2A0jZLl1JE9z5s7DIz/6sWmsZGXn4ImfPo0Zs+6VcaU9P5w503LZ+p/LJZZ9rlkfoNe/Rmp+QSGG3TCiw+dxlYiL2++cqRs3sdaROLfLqD/6JtHmUdvPXmnnUa1YtINoWcWd1XI7Iu1PrNaPJE2d3f9cyWJZjiqj87Udok7ue2COvAaLZFyp1RX6NLsuyR1Ec+bOw/yCQvXjSyYrOweLn3sBySmpAICG+jr84t+eQknxVnXVK5Ka/li62soiltS4HDz0emxcvw4LF+RjxYtLkOR2IzdvRsg2QlZ2Dh4vWITDVZVYuCAfCxfkY8niQgwaMhSFP38WLpdL3cRQZ9bv1WbdG2uw9Lln4PV6AZ06qigvwy8W/S9UlJdptrp6qO0tOSUVU26ZhrWvvYJf/NtT2L5ta0zyN7+gEHPmzpN/q+V6pcjNmwFnfDyWLC7E6hVL1cWXpG2U798r2/ChinLMfmBOpx1v9Yqluvm8Ul2t7c0sruzmKTklFYufewFZ2TlAcBD697//LSRW8mbOCuvrK8rL8PRTj2Phgnxs2bQRzU0eLFlciIUL8rHujTUh635Tqf32paL2r1cKl8uFvJmzsO39zXj6qcd1Y0/tszvKbpzbpbaHjuqsvj3W6bwaXc52oMad2heoyzuDekwrsUrT5Sz3SNlpJ5eqHM3O13bcdc/98LW1yXNsR/fX2WLd13eWSzJBRHQ12LPzQ9mxNdTXocnjgds9QF1NDvYOVZSHDPi9Xi+KfrUcADB2wiTNFkT6ktz9AQBNntPqoi7B7R4AX1vbFTNxVVryAQKBgKwXujp1Rlx5vV68u36d/PtIdSUcDgecvXqHrEekcvbqDYfDgdaWFnUREVGX1tHzdeJ1/dDyebP8u6P7o4u6XT8y82v1Qz25eTMwc9Zs+Xf5/r1Y98YaJKek4of587H53T/Ki+vcvBnImTAZRb9aju//MB9Drh8mt9uyaSNKirciOSUV+Y8/iZ7OeADABV8bila9hIb6OrhcLuQ/8RRqjlRjTHYOejrjUXviON58tQj5TzyFfkluBPwBvPP710OOqZc+9XPtfsr27ZYzvXPmzkP2+Ilh20OTH09jIzJuypL7MZuZjNX+zNKvLR+1PEQZ9ktyy+309i/W05aF3jHFtqK+d+3cgWm3Tdc9tpZYv/JQBcZPvBlxjjiU79+L0pIPZP1r6x46ZSdiRqTV09iIkaPTcfbsGfzf9W/hgYcfkfEn0m4UZ6Ie5hcU6salkJs3A9Numx6SLiErOwd33/ddvFq0OmwZgulPvK4fVq9YKuva7w9gYEqK7nHUsrZTv0b50qPGwq7SEmRkZoW0WW2Zq/Whl8ZI4kE9vrpcr7yM+o6j1VVh/Y22LkXafefPhRxTmye9PkvQi7GiXy3HXffcbxqTdvsqbXsDEFauG9e/FZa2SPKnfi6OnZCYKMvV6/WGxY9V/6tXZ0bxomV2HDttUC2fWLeN+QWFaPm8WS5XY8NqeVZ2Dh586BHEOeIATR6MPtfGutouzMpR0G6vpW03AEL6dJGWQxXlKC35AD/Mn4+6Tz9Fxk1Zsi0b9blqfsXxAci6U8vXbmxAp46N+u1I4lMvrtzuAbLc1DxBp419+OdtuGPG3bL+mps8su1A00+cqj1pGFtQ6kVsq3dOVMvJLP9Q0qtNm7qdWjdaatmLdY36QATjympMoVf+ot+OJM9GaVfHLNr4fu9PG0KWRRqvzU0e+P0BnG6ol+NbO2mCzrhFrKv2BUaxpPbZ2vGMoC1zEVuVFR8jN3insxoLapyr+xPtzSieBKM83HXP/ablqx7PbFyuF0tqmarnCLvt9vs/zDftx9X0aPNgNj7R7hPB9CB4p6iW2E4dB2qPa3UeqzxYgdTBg/Fq0WoA0K1bo3PAnLnz0D85BQ5HHPr06SuP09G+WrRr7flEO4aZMjXXsC9Q067uW6Rbr/7V+tOKpv/R7u/ksRrT80tn9j+1J44DQFhc6RH1bXYdqZaf3T4JMShHs/GRlt5xtOdrvestUa7acTaCbejs2TPyb7E/9Zhq29LmX+y75fPmkHrRlqsar9rlkabX6Nr5SmD7HURTc+/A71//HTa8vRZffPkFciZMQs2RwwCAMePGo+bIYfnM4dBhwzEwJRXl+/Zg547tSLyuH/zt7Xj+mZ+h9vgxWTmHqyqx4oXnsO29zeg/YCBuyb0dlQcPAADGTZiMQYOHoGjVS9i7qxS33nYHpt6Whz9teAdv/K4Io9IzMGjwUPmcoFH6Pvn4AJqbPBg8ZCh+/fJybC9+Dz169MC4CZPR8Fkdao8fw5y58zA6PQOrVizFhrfXouqTg5hx9z3yOdKhw4Yja+w41J2qxYoXnkNzkweTptyKr/CV7nOmsdxf7fFjhukX5bPh7bUYlZ6B0ek3yndQiMHriheew/7dOzHplmm6z8yqZZGbNwPT77wb695Ygzd+VyS3TR08BJWHKtCrtwvjJ03BkKHD5LG/wle46+57UXPkMM6fOxeyf7F+b1cfvLx0CepOfYrbpt+FMeNyULTqJfx562Zkjh2HPgl9UXmoAi6XCxmZY/CbVf+FTRv/gMTr+iEtIzMkLuJ79cLLS5dge/FWdO/xbRl/o2/MlGnf+1GpjLMd27fhN6teQtUnB3H7nXfh3DkvNm1YHxaXQq7J5BAAjL4xE/0HJGPPzh26z6H3698fQ64fhsqDBzAgJRVZY8fhow//jN+seims/KOp38R+SYb5Up/71V7IiLZ2+50zkZjYD1WfHMTpxgbMmTsPg4YMxYr/fBabNv4B/QcMxNicCfKZ+47EwzXduiH/iafga2vD88/8DNve2xwWLxk3jUHPnk6U7d1l2Xf06u0K6W/EgEzsu+e3nTh9ugEpgwbjyy+/xMr/8wL2794ZEmPqPrRE2WtjrEePHpYxabev0ra3kuKtYXWvpi3S/JXv34tdpSUYMSoNx/9ag5eX/RKnGxtCylXEj93+V42/0RmZmDjlFvz65eXY8PZatJ45gws+X1jbt+rn9epXq7PbBgCMnzQFf7twQb5v5Af//C/4+9/+hk0b1usu19bP3y748E/ffxQ7g2276pODuKbbNfi7weeexoaQWB+dkalbh2bvPtFur6WtXwTrr+GzOvTpk4AHH3oE297fjM0b/yDbayAQwPPP/Axle3eZ9rmO7t1D4jHjpjFIy8jE7o8+xG9WvYQvvvwCk2+eJt+pYtaXqEQ/K86TzU0edOvWDV999ZVp3Ij8GcWAXlxpy81OG6v85CD2792FtIwb8cd1a/HHdWtlX293cghKvYjt1XNi6fZtyBw7DsNuGImyvbss283kWy9edIky/urrr9DsOR1x7BuNmQL+dt0+0O6YomzvrrDyt5tnO2nX9qFqfKvjmUjj1dGjO7JzJqDxs3pUHqrAD/75X3Dq04vjtP27d8LRvTvqT30akh5YjPn2flSKw9Wf6MYSALS3t+v22VbnXjGO7Natm4zdrOwcZI4ZpxvnRu3thlGjdeNJm8bTjQ26ebAqX6MY0xuXq8z6pfb29ojarVk/bpZOs/HJtQ5HSHqSU1Ix+ZZb8ef3t4TFrKgr7ThQrY+v8BWm5t6hex7bv3snbr0jDz169ED5/r2AxXUXNOcA0Q6uH3YD/rThHax97RXd2I+mr/aebQ07n2jLVm+crZa90b7Pe88a1r96XtKKpv/RpunsmRbT8wtMzq8d7X/ie/dCzsTJsv8xY3Ud2ZE+CTEoR6PxkcdGfWnbXP/kgRgxKg0fl5fJMaZYvnPH9rC+c1dpSdj+VOMmTkbtyRMyXZNunor2gD+kzho/qw8514l+16pfjjS9avxeSWw/YvY/r/y3vFg+Wl3VodvwR6alw+fz4b0/bZCflZZ8AIfDgaHDR8jPdmzfhob6OjQEH/f5rP6UnGmrrjoEZ3w8XMHn/6NNn8vlwqAhQ+WxEHy86HBVJdLSM+V6zU0emd6K8jJ8Vn/K8PGjWO7PjPYYu0pL5O3uQ4ePgMPhQGnJB0Dw1viyfbtDjm8kLT0ThyrKZTmLbQcNGSrLOhAIYOP6dfLYB/btgc/nw8i09JB9CYFAAMVbNsHr9eLksRqcPXsGh6sq0VBfB6/Xi1O1J5EYnGX3er14/ZVfy5lsvdv4y/btDrt1cMj1wzDt9ryQ2VgRZweCJ8yG+jocqzmKUWkZIdtqJQffCaPNX0c1N3lkGiJhVL+R5EuNBQDYuP4tXLjQBmjiVVumpSUfwBkfj+SU1A7Hgzj+xvVvyeNbxYtdWdk5SOjbN2TfWzZtREN9HSrKy+TjIGqM2aEtDzsxGUlfZVdn5S/S/hdK/AFAnMMh+9eDB/6i21bsHicaRmmLpG0I2eMnYtnKIixbWYTqqkNh3/5aEf12Q30dDh74i+XnQkfq0A6Xqw9mPzAH297fHPINWiAQwK7SEvm3nfjWqj1xXO7vwL49OHv2DBISEy37Ei2Xy4WcCZND6rGivAx7dn5oO26MYiASZm3MiHjfgdXkkBntOVH0qaKPsJN/p9Mp87pn54fwer0Rx77VmElbj+p5RKTZzphCsJNnu2mHSXyrIonXkuKt8lt8QTs22bPzw5BliGDMFyk7597mJg/efLVIbrOrtAQJfftG1N5gEE92GZUvbMSYGbN+KZp2ayaadIpYFa8TGJmWDr/fj5PHapQ1L1LHgWnpmXIcjOD+/P4Ahg4fEdYevF4virdsQiAQkNtHSjsm0Yv9aPpq6JxPImG2b7P6j4ZZ/6PH6vxyqfofM0bXkZ3VJyGKcrQaB10OJcVbZZ006LxOpPbEcXl+V/tdO/3yN4XtCaKs7Bz8cvkqLFtZhIJFi9Gz58Vbs6Kh93yg7/w5+P1+eWJRtXzeHPKMoSra9Bk9G+7xNJoGvZFY7y8aCYmJcPVJQMGixfLCZ+as2ZbHd7lccMbHw+NpDPm8taUlrHPU8nq98LVdnHCwItZVj6GVmzdDpvvhRx9DXNzF2xONxMXFYcrUXOz4c7FstAjGWb8kNxYvWSr3lz1+ouVFmN/vN30njFV56MV3LEWSr4TERPj9fvjOh97dITh79YbT6cTMWbPlvgoWLUafPn0xZOj1HY4HveOL5ZFOiKr09q01v6AwpHw6IpKYtOqr7Oqs/OnFp1X/q1VRXoaN77yFBx96BMtWFiHX4EXuHT1ONCJpG0J58CXVWzZtxLTbpocNkI14g+8cGzRkKJatLPr/27v3+KrqO9//b6zZKDuwgUg2xAQBkUsSYzBGFKaWWInIEVuchxzUUkt/ziPTQRwZC8xYSmeUdiocD7bgmcnUKTreqJxKT/GBNFiDMCiYxkaahEu5lcTATgxhQzaWpMrvD/Zarr2y1r4lAXS/no8HjwfZa6/L97rW+qzvd23zibbb507cytB6Pou1DYM9ryffUqLddbVRL14NidRvN9H6EvvNltt5Uue53sRqY256on27iZX+yopN2vLWZi1YvEQ/emq1CsMvGU207idyzZTsNUW8Ej12JVi/nUSrgwYjWLxiVbnrj0+4bac713zJXos1B46po6PD/rHrMSp8s+RUn3pCInXMiVu/lGy7dZPMcQbDQX3jhjs3ryDiZj8ao3ytDyeWLFuuK7OzNSgjw7EP6Ek91Vd3V6xtu5X/xeB89D/Jctt2d/qkRCVyHXS++Xw+Lfr+42bdsk5xc9LW2qqOjk6l9x+QVL/8eRVXgMiYU7d65ZPmLzwZIxCS4VZJOzo6u1ToeHTn+KJdeCbTQff09pLR1tqqDxsbzV9OMf7F+hWjaDfu7e0h15OxcbLrCSXhebXGsb/03LMxn5oYTzCm3n5nxM1qIHBUhw8eiMiDhfPLoo4QaGps0PInlkZ9EnVo/z51dHQ4joDxhSP39XW77It6TKLpsj4dVPjFyGkejxSur21tJ/TSc89GbOuxRx/SO9ve7nZ9iNZx2jvZREXb9rzwu2OM9BjDspORTJ3sCb2Vvp7of2vCv9S08sllmnLrVMcgUU/sJ1GJtg2ryopNag4ENPOee+2LXAWDQS1/YqkWzi+TLO+gcPvcKloZGvlrP363PHVqo9u3Vmp8Xr5mx/i1jJ6q39H6Emvg3viu23nSLY29UW+itTE3a19Y063RQ7HEk/7Kik1aOL9Mr778vGbOuleFRcUJ1f1Er5mSvaaIVyLHboi3frtxqoNO1zLPrFyuhfPLdOTwIZU9/GiXcnHajrksyWu+ZK/FMv1D5Qmf262iHaNc6lN3JVrH7KL1S8m0WzfdOc499bUaNHiwrrv+BnnT07W3vs7+FUdG+W7csL5Lna+s2OTYBwzKyOixAElP9dXdFW3b0cr/YtDd/kfhh9ux+p9kRMvXZPukZMRzHXS++SzvkDLqfaxRW4MyMuTxpKn91Mmk+uXPq7gCRPZo/djcPDPCblREYyhwVnaOptw6NWJ9u731dfJ6vbrjrrvNz+646255PGmuwzOjiXZ8sRhPAaxPjo00JHOT39PbS8ah/fvk8aRF5G+86ut2qaCwyLxA8IV/setYU6PZqRgjdgzGfpKZRmVnf3IyLjc/rpPi4YMH9OrLz0cEifbW10X9qXonWdk5+pcfPxV1nWB4uK89IGV0PKH29qSfKsQjkXQZFyy3lNxmfjb5lhKlXXruIjIYDOpYU6PjzzWrB+qDEUyz3nSXlE5Tpt8f98WUG6dtT58xU2PGjY+I8htBu2QlWye7q7fS193+t6R0mln3QqdOKhQK2b8i9cB+kpFI23Cyft0rEeu3ftQSMXR45j33mueWrOwcffPBvzXXNUaVuH1uZVwIJlqGTnlaWFSs8fnXdhnmHwyeUPnqp2NexPZU/Y7Vl1g5nScLi4p185e/4pjG3qo3bm0s2iiy2XPmuo4m6Qmx0v/1e2ab/XFz4Jg6wyNGEqn7iV4zdeeaIh6JHLsh3vrtxrhJL544ySzL6yfebL5A1Ofz6YEHv2Muc3ug4VSXe+KaL55z75BMvznFyVi+f9/eLg+4nI7RaG9u9am7Eq1jdtH6pUTbbbR+vDvHWVNdpbbjx3XXzHt05PChLvkeTX3dLtcRq22trRo4cHBE2RZPnGQuT+a+y6qn+uruirbtaOV/Mehu/2NMbXLrfxQOqCSzbad87Yk+KRHxXAfFozlwTGlpaeYD+cKiYhUUFtm/Fjf76Kqs7Bxl+j/Lc0m6Mnu42Sca+WaMDozVL/f08V5IcQWIjBt/Y/hv8cRJZoQ9GL5ZLigs0opV5Sp76BH93haB3lr5pjL9fq0IT0doamwwG5YxxGv4iJFd3qIer2jHJ0snvmDxEscI5toX1mh3Xa05fHrB4iXa8tbmpG/ye3p7sY7fLhgM6sU1P4vIXyPvY6ms2KTNv3ld93/rQa0ID3s9YnsZZ2dnp9pPnTK3Oz4vXy+u+VlSZWdnrSsrVpVraFZ23E8NaqqrtPk3r2v6jJmat2CRmhobtH7dWk29/U5ze9Yh1PZ6mYia6iqtXvmkptw61dy2kVfRnno6SbR8Y6XL6btG+1yxqlwfNh6JaB9rX1ijUHt7xPB+4zi6Wx+C4WGm3vR0c/mUKC8At4tWRk7bzi+coMDRJlXtfMccPr146Q/V0RFfHXLSnTrZHd1J3/atlSooLHKsF93tf9taW826Z9QHp76tu/tRL7cNJ02NDdry1mZNvf1OFRYVm/P7jbZhbTuhUyc1dFhWRNpe/Hm56+dWwfC89Whl6MQpT2fd94DWv/pKlye/xveN9r/o+4+rv8PTyZ6s39H6Ejv7eXLWfQ/o49OnHdOYaL2Jl1sbC506qabwe3Du/9aDvRoQsouV/mDwhNkfL1i8RNu3bVFNdVVCdT/WNZNdotcU0fptJ4kcu5W9fqcnOKLkmZXLI+prbl6B+TQ5GAwqvX9/c9mUW6e6XufY63Iy13z2Pjuec29Lc0DXTSg2l4eivBvLfoxGe3OrT3aJtodYdSxW3x6tX0q03Ubrx2MdZyz1dbt0eT+v9tTX2hdFVVmxKaI8VlimMdZUV5kPPFeEzw/79tRHpD/WfVcsPdFXxyNWX+C27WjlH0usffaU7vY/xnWBU//j8/mU5umbcL0y2PPV3ic5tRO77uRjPNdB8TCuyYxrpdLpM7R3d/IPl63pNtqO/SFnc3NApdNnmPlmnUoYq1+O53jtff3FKu6fuccXU1aUn4vEF1Ohw88wJov6AwDA+VVi+Vlzp6AVzo/zVQ49ed2Gi5PPMv2prbVVxTdN0s+e+Yn9a8B5EdcIInxxxfrlBXyxGMMh244f5yIDAAAgCfbpJ0B33HHX3fJ6vdpbX6ea6iqCQ7igCBClqJLwrwNMvf1O8ycL8cXjs72t3xiGnug0OAAAAJx7P4x9+gmQiNlz5prX5ivCr2eI9/ULQG9jihkAAAAAAECKYwQRAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAivvSoCv8/2z/0M28BYs06/5vasy4XFXt2G5fbMrKztHfL3xMJ08GdcmXvqTvPvYD9bnkEh0+sN/+1W6ZPWeupn/tbtV+8L7OnDljX/yFUFhUrAf/7mHt/+NenTp50r642+YtWKQbb54ctTwvViWl0zTvkYUqnT7D/PeXT/4SVz2bPWeuvvLVUlXt2B5RXz8+HdJ3HlmkP//5Yx072mRfzVFWdk6v1XE4s5ZforKyc/Sdv/+uDh3c3yttKln2tlhYVKyHv/uYvnr7dLW2tsRdH/H5V1hUrO888l2daDvuWO4+n09/v+h78lzW93Pd58RKxxetb+1Ov9XbUuF6CgAAxBb3CKKS0mnypqdr2ZJFemblcvtiV02NDfrBPz6qyopN9kXdtvaFNVr+xFIFg0H7Ikez58zVvAWL7B+fNxd6/06eWbk8ofK82LQ0B7RsySItnF+mjRvWa8qtU5WVnWP/Wq/qzTr+RVNYVKwlT/z4vJbRxdjunFjbos/nU+n0Gdr8m9f12KMPqaa6yv51fIHVVFfpB4v/wSz3C9FuekOi6fg8961Z2Tla8sSPVVhUbF90UZi3YJFmz5lr/p3o9RQAAPhiijtA5PcPU6i9nYsHXLT21teps7NTmf6h9kXA54q3/wB5PB61tbbaFwEAAABAr+gzamzBWfuHdvMWLNKIUVebf2/csF6VFZtUUjpN02fMlCR1dnTq1ZefV011lbKyc/Ttsnl6/Ve/1KH9+1T28KOq2vmOuU7xxEkKHD2q/OsKJUmHDx6IGMXitl07Y1vlP31KklT28KPat6deE4qK1c+bHrFuomnw+Xwqe/hRBY4e1djxeTpx4rj+37pXdM/9D6h2V41uvOmvlOZJ0+lQu8pXP62mxgYp/NSw7KFH1M+bLkmqfm+H1r6wxnX/dvb1az+oUc5VV+nn5c+oqbHBPK4hmX7J5ZiteXD44AG9+PNycx17fs6eM1cZVwzRMyuXxywb+76taY+1rsJPj2fd94DSPGmSJW/s27Wv58Za/sFgMCItcsjLluaA43et9dVez+zH7FRuxvEnUscN9jQYn025darKVz+t0KmTruWtcNts/ahFa19YI4XTbE3L7DlzNTQrWx5PmgYOHOzYluz5b8+nohtvMr9rlJlsdce6HXs+dHR06srsbO3ZXadx4/PMbbnlSTzs+7aXdSLtzppGe3u2sm7Lmkdu+5ZD3kZLs5Gm7VsrI+qcdV9yyGfrZ0cOH9LaF9ZE9Guy7bewqFh3fv2vzT7Fum+3Y7O3AyON9v3Y27Rbf+zEnlfx1kN7ezP2Mygjwzw2e7679ft2sfLKaG/bt23RlFunRk2nU7kZI9uMYzO2vX7dK2Y7tqZD4bQYfXoi+Rtv/bW2AadyjHVOsZejkff2uhJPOux5ZuSPJDMt1rogW3s+fPCAJEX0kQZ731tYVKyZs+7V+ldf6XIdU1Nd5Xj81v7n22Xz1PCnPyn/ukK1NAc0cODgLm34jrvujnn8hkTz3n4ukEP9sm7Xfk4ZlJFh5oe3/wB9u2xexLVO9Xs7tLXyTbOu2PtK+3btbQ4AAHw+xDWC6JmVy1X93g4dPnhAC+eXmTd/U26dqpVPLtPC+WXa/JvXVTp9hnw+n331LoZk+nXmzJ+1cH6ZXnruWV2ZPVwlpdMky81xMtuVpAlFxSpf/bQWzi/Th41HzHWTTYN/2DA9+fj3tPyJpTrV3q60tDSNGZerJx//npYtWaRQKKSZ99wrWW5St7y1WQvnl2nlk8s0esxYFRYVO+7fzr7+siWL5B82TGlp5y4yjQuwUHu7Fs4vM4955j2zI4bsG3mw8sllyvT7tXjpD1WxcYOZJ5NvKbHsNVK0shk5eoz27ak3jy0UCumWktviWte4+H715efNvDl54kTEja2xXW96urleLEMy/VqybLlWrCrX+Lx8rV/3irlsbG5eRFl4vV5dP/HmiPWj8Vmm+RjbOP7RR/avOYqWF1Z76+vk8Xg0cvQY87PcvALtrqs1g0OxyjuWzEy/KjZucJyqZM//hfPLtG9PvRS+0Rqfl2+2j5VPLtP4vHzHdLgZkunXB7+v0sL5ZfrP//NTvfTcswqeaNPKJ5f12M2Dvd0k0u4Ki4o1esxYM42vvfpZ/bEqsUyxXTi/TNu2/FaKsW973sZbt2uqq7R65ZMKnmjTS88922XaRzAY1JHDh5SbV2B+NnL0GHk8Hm2tfFMlpdM09fY79dJzz0bsd7ZlOkki3NquJOUMH2Hm3cYN6zU+L9+xL1po64/t7HmVaD20trddNdW6/1sPKjevQAvD7S9z6FBzqk88/X4i0tLSzICusT2nNmovt6zsHGX6/cr0+83vZlwxRPV1uyLWq6zY5Npu4s3fWPV3d12tme+762r1jbl/E7GdeM8p9nK01vmeSIfCgZX6ul1mnSsoLDLLdvacuRo+YqSZzvq6XREBYit73zsuN1/9+nk1LjdfkpTpH6r29pAO7d8XV5tKS0tTev/+Wji/TMufWOrahqMdv5N4876+bpeGjxhp5ltWdo4GDR6s7VsrI7YXDAa1/ImlOnzwgKrf2+F4XlA4Pca1zkvPPauCwiKVPfSIylc/3eX8H63cAQDA50tcASInxk2s8fTo/Z3vqqOjM+JG101Lc0Bv/Po1KXwz9GHjEfn9w6RubleStry12Vx3+9ZKeTweefsPsH9NinNfVTvfibg56+zsVMXGDQoGgwoGg6ra+Y686eny+Xwam5unUCik93e+K4Xfn7B/317zgjMW+/rBYFAVGzeos7NTstwEWoMg7+98V6FQSGNzPxuZYeRBU2ODmgMBfdh4xLwArK/bZR6vk2hlU1NdpV+tWytZbnaMp6Gx1p18S4l21/7BPI6mxgZt3LA+4sbW2G7Vzncibn6jsb6DaP26tSqbv8C82K6s2GQGBIy8MI4nEcY6TY0N+uD939kXO4qWF1ZGHTEu8o2L+j31tXGXdyzW8rczAmbGsUoyy3j4iJER7ampsUG762rjLhuF88Goz73F3m4SbXdpHo85LfGD939nptfO6/Wafcm7295WMBiMuu/u1u1otla+qUGDB5t1fVxuvtqOH1dTY4Ny8wq0q6baLHNjv9abx0S4tV1J+q9n/93ML6cpnvH2x92th9b2tqe+VqdPh8wb40P79ynU3q5BGRlSnP1+Ijo7O7V+3dqI7bm10T31tUrz9JXP51Omf6iaAwG1tZ1Qpn+osrJz5E1P1976OvtqruLNX8Wov9Z831r5ZpegdbznlGTrfCLpOHzwgNmvv7/zXZ04cVyDMjLk8/k0fMTIiHN2ZcUmcxSRXVNjg9qOn1tXkvoPGKDaD2rMc9q43Hwda2pUMBiMq011dnZ2CcY4cTt+N/HmvdEHGW1pbG6eOjo6dGj/vojtxct6rXNo/z6dOHHcbDf283+y5Q4AAC4+SQWIfD6fvOnpKrrxJq1YVa4Vq8q1ZNlyXZmdHfVCJ5be2q6T3tiX3z8sYkTLilXlKrrxpoggSjSx3vM0KCNDHR0dCp367JeXgsGgQu3tjsEHhYfWt37UYv84afMWLIpIWzyMvA4EjtoXnbuwHzhICxYvMbc7fcbMqEEsNzXVVdpd+wczMODz+bTo+4+b23V7kuwmGAyq/KdPafiIkVqxqrzXXnS8p75WgwYPVlZ2jsbm5qnt+HHVVFclVd6Jcqtzbu/ACQSOJlU28SosKtaPnlptllk8ed6ddldTXaX1r76iWfc9oBWryl2feFdWbNKWtzZrweIl+tFTq83ATLR992TdtjNubsfl5svn82loVra2b610bWttra1Rb7rduG3PYC2vBYuXqF+/c9OUEtWT9bA5cEyh9nY1B47ZF/VKv29ntFEnzYFjSk/3auToMRqXm6/6ul061tSocbn554I17e1moKQnRau/9nwPnTqpjo4O1/yIdk7pzTofi1tdiab1oxbl5hUoKztHHk9fvbvtbXnT0zVmbK6GZmVrT32taxtItk11R7S8N4I2RlAmN6+gywOuZBl12p4HhgtZ7gAAoGclFSAyLhY2blhvDks3/tmncCSit7brpDf2FQgcNaeyWP/FO5XG6aZnUEaGOcUs2gWp24VbTzLecWCkq/q9HfavOIoW1GhrbdWHjY3mKCDjn31aTbyMoIDP8u4KY5tuT5KjCYaH4y+cXyZZ3unQkw7t36eOjg6Nzc1Tbl6BOcXkfJS3U51TjJtE+w1lT6qprtJjjz6UUNvpbrsz9rnyyWWacuvUqEGihfPL9OrLz2vmrHtVWFQcdd89XbftjCkluQXn3nN1aP++qG2tvT0UEWyMR7TtFYbfz7N65ZNaGJ76dfq0c2AklvNVD3uj37czAgpOjMDedRNuUMYVQ7S3vk576ms1NCtbw4ePdL357wlu9dcp3zs6OhMKtBh6u87HkpaWFlFXopWFLMH5G2+erNaPWrRvb71C7e3KLSiQx5Om5sCxqG0gmTbVm4z0XHf9DfImOBqtOy50uQMAgJ6TVIBI4ZuT3vhJ8d7arpOe3tfe+jpl+v2uN5ixtLW2auDAweYQcZ/Pp+KJk8zlRiDBeOeRwu+WyPT7e/1C0P4U1Rcezh+v+rpdEe9ayMrO0fQZM3Vo/z55PGm646677avIFx4BFO+7UwqLipU5dKj21Nd2eZpsvO8jEVnZOfrmg39r/t1bN2/B8HD8KV8tVZqnrzlVIJ7ybv2oJWKaw8x77k1oFMfe+jp5vd6I/P/6PbMlSUcOH4poH1nZOZpy61QzgBUIHI14f8odd91tvqD0fOpOuyspnWauFzp1UqFQyP4VKZwnRt1tDhxTZ0eHFGPf0ep2TzCmRk2782v64PdV5o2Yva35wu/SMqbLNAeOKS0tzZwCVVhUrILCoohtW9m3Z7Rd+wi3sbl5CdU9q+7Uw0Ql0u/Hk1dpaWkR73Uz0uA2tbK+bpdGXXONOjrOqKmxwZwCNPyqEdpTX2v/eo+IVn/t+X7HXXfL40lLampSb9f5aIwpWMUTJ5n94fUTb47aJx3av0/t7SFdWzDBzPv6ul26tmBCxGguexuwt6mLRU11ldqOH9ddM+/RkcOHemU0mpNo5Z7oeRwAAFxYSQeIKis2aXddbcSQ4kXff7zLk8hE9dZ2FX63Qqbfb04l6el9NTU2aP26tZp6+53m9qzD+e37t6uprtKrLz9vrr946Q+1b0+9+Q6iYHjKkzc93dy+8XLU3r4QNIIY02fMNI+to+PcccWjsmKTNv/mdd3/rQe1Ijwd5eMzHysYDOrFNT/T+Lx8M01u+ePEOr3n/m89qC2/rVBNdZWawu+CMfZX9tAjrjf/bkKnTmrosCzzmIaPGKkXf15u/1qP2FtfJ509G3HDEU95G+8OMfLgw8YjCY3iaGpsUPnqpyPy/8rs4QoGg1r7wpqI9rFg8RJteWuzOdKismKTmgMBc3nfvpeppTlg30UE4wZmweIlPTYaqzvtrq211VxvybLlOnL4kONIkmDwRETd3b5ti1nP3Pbd3bodSzAY1LGmRuns2YgAsb2tGekyft2oqbFBW97abLbl0ukztHe3e4DZvj2j7RoBEKPuFU+clFDds+pOPUxUIv1+PHnV2dmp9lOnzG2Nz8vXi2t+5ho4MNr6/j/ulcLl2NlxRmc6zrgGZbrbbqLVX3u+Dx8xMuJX8xIRq853Nx2xGP2zUSdz8wqijhw12pA179taW3V5P29EANLeBuxtyon1HORWv3pDfd0uXd7PGzPYuH1rpQoKiyL6ymTFKncAAPD5EdfP3APoXVm2n1QG4hHr5+nRu2i3FzefZapxsgHFz5uS0mnmz9UnE+QDAACpLekRRAB6zsx77jVfTg3Eo7CoWOPzr43rl5OAVHTHXXfL6/X2+hTsi4Ux/bKnXk4NAABSDyOIgAuopHSaps+YqZbmAE98ERdjVMSQTL82blifMiMjLkaMILq4zJ4zN+LXNU+H2s/LFOyLwbwFizRi1NWqfm9H1KlvAAAA0RAgAgAAAAAASHFMMQMAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFEeACAAAAAAAIMURIAIAAAAAAEhxBIgAAAAAAABSHAEiAAAAAACAFNdn1NiCs/YP3WRl56jsoUfUz5tuflb93g6tfWGN+ffsOXNVdONN5t+nQ+0qX/20mhobJEmFRcWadd8DSvOkmd/ZuGG9Kis2mX/PW7BII0Zdbf7d0hxQ+U+fUjAYlOLYB8eZ2HECAAAAAIDUllCA6EtfulSZfr/6XPLZwKPQqZMRgQafzydv/wHm32c//VTNgYA++eQvkiRP377KuGKI+vTpY37nxPHjOn06ZP49OOMKXXb55ebff+nsVEtzs86e/VSKYx8cZ2LHCQAAAAAAUltCASIAAAAAAAB88fAOIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcQSIAAAAAAAAUhwBIgAAAAAAgBRHgAgAAAAAACDFESACAAAAAABIcX1GjS04a/8wEV+ecKluv8mjYZlfUntHX73wqxP63e6/2L8GAAAAAACAi1S3AkR/e/dl+odvXC71TdeXrhgjXdpXn546pu//qFZrKzrsXwcAAAAAAMBFKOkA0eTr0rTmB+mSpEsyx6nP5YPOLTj7qf58cIfuWXxK9Yc+iVzJgc/nU9nDj2pIpt++SBs3rFdlxSb7x58rs+fMVdGNN+nwwQN6ZuVy+2IAAAAAAIALLul3ED3wP/p+9scnltFCnaeVdmkffWO6ZXmSps+YqUXff1w+n8++CAAAAAAAAD0k6QDRFYP6nPvPp59Kf9wrtQWkEy36pGW/JMk/OPFNb9ywXgvnl2nh/DIdPnhAkjQk06/rJ95s/yoAAAAAAAB6SNJTzP7PYq9um+hRn+AZnb38S5LnUunjTvX59KzOej36RcUZff/fT9tX68I6xcw6pcz6uXV6VknpNE2fMdNcv/q9HVr7wpou60jS6VC7ylc/rabGhoipXq0ftajoxpu6rG/fdktzQOU/fUrBYNBc3+B2rIqy32dWLte8BYs0YtTV6uzo1KsvPy9JmnXfA+rsPGOuAwAAAAAAcD4lPswn7BdvhqeVXSLp03P/7fOJpD7nRha9aixPUjAY1JHDhyRJ3vR0+Xw+zZ4zNyKAI0lFN96kktJpysrO0T/809KIdxn186brG3P/JmKK2ohRV0cEeoz1C4uKNfX2O83PreYtWBSxjiRNvf1OFRYVx71fhQNQ1uBQTXVVxHIAAAAAAIALIekA0dvVnVr1i491tn9fXRI8I509qz6hDp3tl6Z/+dlp/WF/z/7UfXr/ARo+YqRkmYq2ccN6SVJuXoHG5uapnzddLc0BLVuySMuWLFJLc0ADBw7WyNFjzO0Yy63T2HLzCszlsmx/+RNL5e0/QJl+vzo7OvXSc89q4fwyVb+3Q2meNI3LzY97v2meviqeOEmStPk3r5vBoZrqKj326EP6wT8+yughAAAAAABwQSQdIJKkVb/4szb+d4fO9rtUfdo7dLa/R8+//me99MYZ+1cT5vP5zIBQqL1dZyV5vV4p/PLqFavKI0YT+f3DpPA7i5YsW64ly5Y7/jJaqL1dwWBQktT6UYv5eU11lT5sPCJZtj9vwSJl+ocqLa2v0jxpuv9bD2rFqvKI0UTx7vfK7GwNyfTrdKhde+vr7IsBAAAAAAAumKTfQWS47povad0/X65LTnXo04GX6auPtKvhWHjOWRzc3kFkf1dPc+CYyh56RP286RHfMxjv+rG+Nyjacknmfq3vIZLtXUQ11b9T3rXXSZLjtDD7dt32a4xWGjHqan7yHgAAAAAAXFS6HSDqn+7VTxaN1PAvfaQ//nmoHllerzNn4n//kP0Fz3bW4I39RdGyvBBakhlAsjLWd1pXUpeXRad50rosm3xLiUaMutqy1mcvsPb2HxDXfg8fPKDtWyvNfRhBrsKiYl5SDQAAAAAALqhuTTGTpDFjRuvPvm9qr3eBzl5xn0aPOjctrLuMd/5YR/asfWGNqt/bEfE9Q1Njg8pXP63ToXb7ogiHDx4wR/PI9j4gO2PZMyuXR6xjFe9+FZ7GtqumWgpPYyspnWb/CgAAAAAAwHnX7RFEY68Zrb8rmyuPx6OzZ89qxcpndOjwn+xfu+DsPzcPAAAAAACAc7o9gqjp6DFt/u0W1dbW6ZVf/PKiDA4BAAAAAADAXbcDRKfa2/X6G5v1zH88p23v7LQvBgAAAAAAwEWu21PMAAAAAAAA8PnW7RFEAAAAAAAA+HwjQAQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIr70qAr/P9s/zBeJaXTNOu+b6r2g/d15swZ++Kk9dZ2L3Y+n09/v+h7+vo9s1U6fYZKp89QyW3T1Nraoim33a7pX7v7vOVJYVGxHv7uY/pUn+rwgf32xZ8rWdk5+u5jP1CfSy45b2mZt2CRrr5mjGp31Wj2nLndLjujbngu65t0Gno7HxJJZ0+kp6d1p9+Zt2CRbrx5sqp2bLcvOu+6k44vqsKiYj34dw9r/x/3qv8AX6+2g0T0dpu0SqR9JquwqFjfeeS7OtF2XMeONtkXRxVPnzB7zlx95aulCbezrOwc/f3Cx3TyZDDh40o1JaXTNO+Rhfqrr5Ro357duqRPH/O6JOOKIfrKV0tj9nXns173tp5Mi1v9tfbZGUMye6yudqc9XmjzFizSrPu/qTHjcnX1NWPMfOtOmmbPmasH/uY7EdfW0bZhtAXjetzeJqL1Vck6H/10KouVv4meK4x7pXF5+V3atVt7d7q/Muq7Udfsde7UyZMR9de+7POuJ/tZ9IxuBYhumvRlBU+cUPV7O+yLuqW3tpss681+b7rssst0w8RJevutzfqP1U9r8xuv67cVG3XsaJNqd9Vo+9ZKxw6tN/z17G/o1MmghmT6u3Rusbh1iueL9Wbw1MmTOnXypLa8WXFeO50bb56sj0+fVu2umh4pO6NuNH3YkHQ6ejsfEklnT6Snp428erSuzM5R9c5340qDVdWO7Resvtt1Jx1uErloutDt38nQrCs1Zlyufl9dpabGhl5tB9FcyL4pkfaZrGNHm7Tlzd+YdcSe3mji6RPyr5ugfv28Cdet/gN8mnDDjdq3Z3fM+tubEsmPCyErO0dfu3uWfrn2Ja19YY1OnTypmbPu06WXpumHS/9Jtbtq4urrzme97m09mRa3+mvtsz19+/ZYXbW3x8+LktJpGjHqaq3818e1bctbEfkWb5rs5yyfz6ecq0Zo1f/6sTa/8bqG+P26vniia6Bg3oJFyru2QKtXLtdrv3hJm994XX0uuURXDMnU0Q8bY/ZVyTof/XQqs+ev/Xol0XPF0KwrNerqa9Tf5+sStHRr7073V1U7tmvzG6/rvXe2aXx+gXkfuOXNCp06eVLzFizSiJGjutTHb367TH/55C89Xg/Pt57sZz9P7P3UxSTpKWY+n0/DR4xUIHDUvqhbemu7yfL5fPKmp180x3M+ZGXnyJuerv9++y0NGjxYhUXF9q8AAIAvkEz/UElSc+CY+VnGFUPU+lGL5VtA7/L7hynU3q5gMGhflLRgMKhfrVtr/r2nvlYej0fe/gMivqdw0MCbnq7//a+Pq6mxwfy8smKTKis2RXwXOKuzajzyJ5VOnyGfz2dfHCGZ+6uS0mnK9PtVvvrpLvVx44b1Kp44KeZ+gUT1GTW24Kz9QzclpdM0fcZM8+/Ojk69+vLzqqmuUlZ2jsoeekT9vOmSpOr3dmjtC2ukcJCl7OFHNSTTb657+OABPbNyuRRlu82BY/p22Txt37ZFU26dqn7edLU0B1T+06fME4d9v7Lse/acucq4Yojq63aZ27cel928BYs0YtTVUvj4tm+t1Kz7HlCaJ02SzH1Lck1PSek0FU+cpMDRo8q/rlAbN6xXZcUmzZ4zV0U33iRJOh1q79LQZcmnqp3vdDkJGdu17n/fnnpNKCpWP296RFlYt2UcozW/YykpnabcvAI9s3K55i1YpNaPWiLyzP5ZVnaOvl02T6//6peafEuJmYeSzPTby8maB8axWtNz+OABvfjzcjMN9vTZ64xRrvbPrdux5qu1PKzry5LXRhka24mWf9bttTQH1NHRqWNNjeYxGWUXDAYd64KkLnXdmmZ73bCXr71OFRYVR9Td6vd26I1fvxaxjXjSac3PluaAAkePKr1/f8e8sKcznjKvrfm98gsnmOkw6osc0mBdlqx4yr225vcqKZ0mObR5ax2ylonfP0wZVwwx8yVa2q3txVqfY9URe38hW59lHOv1E29OqFzty43ja/jTn5R/XaFamgMaOHBwl37Q6eLdejyK0v6NfDfyMHD0qMaOz9OJE8f1/9a9onvuf0C1u2p0401/pTRPmqrf26GtlW+a24iWJ3LI/9oPapRz1VX6efkzCp06GVGW9u9azx9Ds7Ll8aRp4MDBevXl5zX5lhLXvu/Q/n1R67RTnjv1TU7fs5eNNW9i5YXBWse8/QfE3I5bvtj7f1nOCVsr3zTzY1BGhmM67Omz1wW3/FO4/UZrZ27nd6c2F09fEK0NWdc/fPCAJHXJFyt7uo3t2fty636c6uC43HxlXDFEkjRi1NVmud1Scpt5PG75oCjXBvbja/jTn3TZ5ZeZ3zPOR8b+jWO099NO5xrZ8supP4xWF532YQSu7PVQkuP5yVpXOjs69d6O/1Z+QaF+Xv6MmhobuqTfXi8TOW+6sddfg1PbNOqqcVwbN6zX3vq6iL7Z2K+9Llv7Xeu2jP0rXHfkUFfcysmJPc/c+nunc7/b9avTOcR6frWnSbbzzulQu97+7WbdNu1O13OWcQxHDh/q0k7s5e3E/h17m7LXb6c+yu1z+7WAfdv2tFiPJ1bfae9LjDrh1A8ax7flrc0Rafzzx3/WZZdf5ngtZM/PwqJi3fn1vzbbmNs2q3a+o/d3vhs1D53qfTL7kWTm7ze+XdalrhltLFp/ZGXs+9UX/0tfnzU7Ig+c2ntJjPsre92Swz2XlVN7sLLXHyPvCouKNXPWvVr/6isR9/C762rN87jb/Ygc2r69v3Rr33LpY+zXZfbvWfehOM/RVvb2Zm1H9mX2PtHg1IaM6z77edJaFm7psJ/XnNr2hRT3CKLZc+Zqyq1TtfLJZVo4v0wvPfesmpsDOrR/X0RjNJaNHjNWWdk5ysrO0T/801IdOXxIC+eXaeH8Mh0+eED1dbtibtdQPHGSylc/rWVLzp3877jrbincMB9asNjc77Ili/RhY6P21NdK4SdfaZ6+Gn3NWC2cX6aNG9ZrfF6+srJzzG0bZoefGCxbskgL55fpmZXLVVNdpc2/eV0tzQEtW7JIy59YKm//AVHT4/cP05BMv86c+bMWzi9TZTg4NHzESHPbzYGAbim5zX4ICZtQVKzy1U9r4fwyfdh4xIxeWztrI1+86ekqCd/0xpKbV2Cmp75ul4aPGClfnNHpZ1YuV/V7O3T44AEz/daOx8iz3XW1+sbcv4nYrpGelU8uU6bfr8VLf6iKjRvM9E2+pcT8bs7wEWadsZZrZcUmvfTcswqeaNPKJ5c5dhaz58zV+Lx8c/2VTy7T+Lz8iPyxluFLzz2rK7OHu+afvXyrdr6jK7Oz7V+TwnV29Jix5r5fe/UVc1laWpqm3DrVLNPNv3ldM++Z7VhfR44eo3176s3yDYVCZp0yOv5XX37eTN/JEyfsm5BipLOkdFpE26zYuEFjx+fZN+Eo3jL/qylfNcv4peee1ZSvlqqwqFg+n0+l02do829eN9Nw/KOPIvaRqHjLfeToa8xjDrW36xvfLlMwGNSRw4eUm1dgfvf6iTdLkt7f+a75mRJIu5todcSqpHRaRJ+1bctvzWWxynXq7XfqpeeeNeuPNz1ds+fMNddPS0tTev/+Wji/TMufWKrVK59U8ESbXnruWS1/YqnrCSxa+zf66ZVPLtPoMWMjnpz5hw3Tk49/T8ufWKpT7e1KS0vTmHG5evLx7+ml555VQWGRyh56xDwPWOu7nX1/y5Yskn/YMKWlnTsJ2828516zrJYtWaQ/HT5oLsvM9Kti4wY99uhDjhdfTtzqdDx9U7xlY+SNkRcz77k3YjvxiLYdex5ay8x+TigsKtagwYO1tfLNiO27pdet7za45Z9dtGOMJd6+wK0N2fv8+rpdETcaTpzyI55ztVMdHDHqatXX7dLC8PXEgsVLJMk1Tw3R9mc/vp/+rx9p+RNLdfjgAVW/t8OxDcR7rrHn1+662og6G60uuu3DXg+zsnM0aPBgbd9aaW7XYK8rTz7+PY0ZlxvRJ8Sql1bR6kZPsvYHxk2TtW826lDfvpeZeVv93g4VRxlRYK07GzesV0FhkdlmYpWTlf364KXnntWfP/447vOf2/Wr0zkkGiMoaOxr5/b/1p7dda7nLGsbcLoJ9PYfII/Ho7bWVvsiV9Gux9zOL26fW9nb68L5Zdq3p97+NVOsvtPel0TrB5saG7R+3VpN/vIUZWXnmNc7z//smS7XQiNHj5HH4+lyDji0f586Ojo0NvfcdePY3Dz186ab6xqjt/bW10XNQznUe6tE9mPlVtei9Udu2kOnVLFxg8bnXxv1HJTo/ZUvxiyW0KmT6ujo0KCMDPuiLvXH2t/XVFdpy28rzHZ3S8ltag4EzDYR634kVn/p1r7jvb6NVjcNifTDY3PzIq4VvF6vWafjaYsGaxsygkOh9nazfdrzKVo6aqqrXPupi0FcAaKs7ByNHjNW69etNSOo43Lz1dlxRsFg0Mxco3GNy81XR0eHQqdOdql0xkm8rbU15nbH5uapX790VWzcoKbGBgWDQYXa283jmnxLiXbVVJv7HTl6jNLTvWoOHDMblceTpldffE6S1Nbaqra2Ewqdin/uv32oa7T0KByUOnzwgLncaAwvrvmZGanM9PtdG7skTZ8xUytWlWvFqnIt+v7jrp3Hlrc2m/m2fWulOVzW3lkHg0FV7XwnokN3Y1zsGx3p3vo6eTwejRw9xv7VuI3NzVMoFNIbv37N/Gxr5Ztdtmukp6mxQc2BgD5sPGJekNbX7ZI3Pd3Mi/969t/NtO+tr1NnZ6c5PD4aX3gKozXvmhobtLuuNiJ/WpoD5vHWVFfpw8Yj8vuHmcsNxvaqdr5j1pHKik06HH6i7CTN4zGP9YP3f2ceR2dnZ0RbeH/nuwqFQuZJz6qmusocLm0EL4wng5NvKdHu2j+YedfU2KCNG9ZHrG+Ils7cvALtrqs1j6emukq7aqoj1ncTb5lv/s3r5nHWVFep+dgxjcvNN5cbx9LU2KAP3v+d+XmiEin3F39ebv69fWulBg0erKzsHO2przX/r3D+HDl8qEuHHm/ao3GrI3Zer9e8+Hl329vmscQq11011Wa+G/2D9UKls7PT8UYrGUZ+GIG0psYG7d+3N6Kcre1H4f1XbNygYDCoQ/v36cSJ42ZdtNd3O/v+gsGgKjZuUGdnp/2rJmNbwWBQ72572/zc2gfFK1adjibesjHyxlhu7RvjFW079jy0lpn9nDAuN19tx4+71lG7WH13vPkX7RijSaQvcGpDyfT5buI5VzvVwcMHD5jXPfV1u3Q61G5uwylPDfHsLxHxnGuc8mtr5ZvypqebfWm0uui2D6PcjYv8sbl56ujoiHi4aLDXFac+IVa9tHKrGz1pxKirNeWrpV2eRtv75mAwqOef/Tczb6NNnZKt7ry/812dOHFcgzIy4iong8/nU/HESRFtqKa6Su9uezvu85/b9WsijOvV9es+u8ncuGF91L7ojrvuVqi93TE4lKyaKNdjinJ+cfvcYNRta17+at1as3zsYvWd1r4knn6wprpKbceP63987W5N/vIUs41urXwzYnqU2znAyAujbfj9w1T7QY3SPH0/O8+0t6upsSFmHtrrvVUi+4lHtP4omprqKu2u/UPEw2yr3ri/iiZWf2/0h1+/577wvfhn7SjW/Uis/jJa+451fRtP3VSC/XClZXpoU/ge0/rdWG3RYG1DRv5a882aT/Gm42IVV4DI6cRrDO/1hQMxRTfeZAY1Mq4YouVPLJUkDR8x0oyWyjbHPdp2FW7kThdHsgRmjNFCkjQoI8MMTBlPAYxGLlvwyW7tC2sUam/XkmXnhlobMsJT1GSptG7p8fl8SvP0jejEBmVkyDdwkBYsXqIVq8r10ILFWr9ubdSnIhs3rDejkclEFO37XLGqXNNnzIyrgxuXmx+x7oLFS+QbOMi1w4uHPcimGFFvhYfqR3vvQWFRsX701GrzGPv1+2yKYTRuT4cCgaNx5Y+d2/bc1FRXaf2rr2jWfQ9oxapy12i3wh2VNSBqN2/BIrN8jeGLRnuMFoCMR3e3k0yZK1zuCqe9/KdPafiIkVqxqtx8SmhNcyL/SkrvcCynWOXeHDimjo4OyXKxNDY3T1nheeT2J2bqRtoN8daRyopN2vLWZi1YvEQ/emp11KdWBrdybWttTfgC3VoW0fbvD4+qXLJsufn9ohtvcg3w2BntwH7MbpzyPxrjaeSKGAH5ZEXrx6x6smy6K1qZNVkCMT6fT0Ozsl0v3J0k2ne75V+0Y4zGrc+O1RcY3Na3mj1nrq3/cW7D3TlXGxJ58NUT+zO41Vc7b/8B8nq9EQ++FixeooEDB7sGYAzp/Qe47sO4KTQutHPzCroEmg3x9AmJ1sueZj2+tLQ0Tb6lRFt+W+F4/WtXUjrNzNv7v/Wg62jJaBIpp2htwCmvEzn/JcJ6zZ8Itz5F3ThWp+sxRTm/uH1u5ZSXiYiWTrcytPeD69e9ouyc4dq/b69ZF5saG9R2/Hhc54BA4KiGjxiprOwcZVwxxLz5Hjl6jHIto2kUJQ/jkch+etPWyjeV6fc79vnJ3F8Z1z9ugQ+3clQc/X0wHCgfOz5P27dt6RKosbLfjyTbX8ZzfeuWJnvdTITP59Oi7z9u5oN1xG88bdGJU/9jLa/eSMf5FFeAyN5JFRYVK3PoUO2przUzwBgSv9Ay/M8pcybfUmJmaLTtyuHliNbl9pcp+sJPNIztZfqHqr095Bp8cvLMyuXauGG9ptw6VVnh6XHW0UGx0mMdwWTw+4eZwxcXzi9zHKbd09paW/VhY6M5TDjeYJMvHACzBqgWhofuWUdOJMqtMXR0dHZpOPEoDM/5Xb3ySXPI3unT7oEUq2gn/2ROxE7bMy6c3dRUV+mxRx/SyieXacqtUx07SMXYjjEn2Sij6vAv/sU6mSTKvp1YN1+GZMvcuv1gMKjlTyzVwvllUjjNz6xcHlE34/1XWfFGl3IyRCv3TP9QeTwe8+/6ul3KzSuI+kQq2bRbxVtHKis2aeH8Mr368vOaOete1yCNIVr9aG8PJXShbS2LaP1aIHA0og80/tmHifcUp/wflJER9abJSMuRw4dU9vCjXcquO+JtMz1ZNt0Vq8z21NdqaFa2cgvOzf13GrXhJJm+2y3/Yh2jG6c+21wWpS+wSktLi9rnr31hTcQxuT0QSvZcnaye3F+0+moVOnVSbW0nIq4RF8boMwztp05G3YcxqvO662+QNz29yxQSq2h9QjL1MhlOfZMczrPGaImpt9/p2vcbSsLv4jDK9KXnno06WtJNIuUUrQ25pTGR81+82pIInq99YU3U0UNBh+nksbhdjxnczi9unxvc8jJebn2nYpShtR+8peQ2tbWdMF8bYjCmR8U6BxijZG68ebI6Os5o3956HWtq1HUTblCap69ZJ2LlYSzx7qe3NTU2aMtbmzXl1qnqP+Czetmd+6toU9GcBloY4unvJ99Sog8bj0SdlirbOa67/WWs69t462a8fJb3Ohl5YB/xG6stOonW/wQCR3s8HedbXAGiQOCoMv1+ZWXnyBd+N4jOnlVz4JgZSXaKgNozpyT805XGtIxo27WPEMrKztHMe2abQ42NIIwRKLrjrrs1JNNvRonto4WMyh1weBJl1dbaap5c7UGoWOlxiibuqa9V5tChMW/cetKh/fvk8aSZ72qy8oWjqNZ3WhiM4XL2iyz7/N7Wj1oiOquZ99wbNXq8t75OXq834njuuOtueTxpjp1aLPZ8HhueihgP4+RvBAEVrltTbp2a1BMG4yLZ2rleP/Fm84VldiWl08zOMHTqpEKhkLnMeGpoMPLL/o4be102TjyG+rpdEe8VyMrOiXiZXDyMfLLOKy4sKtaV2cPtX3UUb5lb860k/EsNWyvfVFZ2jr754N+a34sW2I1HvOU+JNNvDus2+qT9+/ZGDKX1pqereOIk1/oSK+1GP2IM/TaOwxCtjlh9/Z7ZZhk3B46pMzzSKRZ7/TDSeaypsVdOWHvr61yfqPWGttZWDRw4OKIciydOsn9NCi974MHvmHUw1vkhnr7PrU7Ho7tlM2/BIse+PVGxysxowxMn3RL3sSnOvjve/It1jG7i7QvcGMPT4+3zo4l2ru4NPb0/e311OtcEg0Eda2qM6xd+nETbhzGq866Z9+jI4UOOAXuFr8Oi9Qnx1Mt4zVuwyBzxaud0bigsKtb4/Gu7jMA4fPCAXn35+ZhBIvuD1nG5+VGD4W4SKSenNlRYVKybv/wVxzQ6nft7gnFtan0/zPQZM81jcjJ7ztyYowSMUSD271nPzQZflOsxt/NLev8Bjp/bOeXl1++Z7Xrs8fadcilDez9YUjpNw0eM1Jp//6n279sbkc/v73xXHR2dmnbn1/TB76tczwHGPeK1BRO0/497pXB7HH7VCClchtHyMF7x7Od8qazYpOZAQGPG5ZqfxXt/5aSyYpNC7e1dAhcl4XeUWWfKWMXq741rhWdWLleovT3ie9HuR7rTX8ZzfRtP3UyEfXBHVvhVL4rSRuPh1P8Y7W5vfV2Pp+N8iytAZMypW7B4iRYv/aH27amPeEKxft0ryvT7zaFbRqcaDM93NIas5uYVRETtom030z9U/fql6/5vPWgOYdvy1mYz8m8MczeW9+17mVqaA+a27aOF3BqnHIbnGkPtjMJfsHiJZs+ZGzM9fv8wM1hkqAnPSTWO021IXU8KBoN6cc3PND4v39xnPPudfEtJ1HnExsnHmPNpDO3/sPFIRPTYOLka+2xqbFD56qcjjmf4iJFJv63dCJgY+y+eOCli/8ZF44LFSxwv1Na+sEa762ojhnluCf/aQTKMztU4Hnu9sGprbdXU2+/UilXlWrJsuY4cPmTut7OzU+2nTpl5ND4v33x3lZW9Hi5e+kN1dHzWHisrNmnzb16PaDsfn/k4YhvxWPvCGvMFqCtWlat0+gzt3d21/TiJt8wDR49q8dIfasWqck29/U5zznPo1EkNHZYVsa713UDJiKfcW5oDum5CsVk+Idu7CpoaGxQKD7O1B+4MsdIeDA/rLSgs0opV5Sp76BH93vKENlodsQoGT0SU8fZtW7o86XVirx/GPqI9VbX2t/aLZjun9r9+3VozTStiTEnrrprqKvPGakW4fdjPWYZgMKj0/v3Ntjvl1qmObc4Qq+9TlDqtOPqmZMrGkN5/gNI8fSOmXScrVpkFwzeTgwYNdL0BkUN6Y/XdipF/VrGOMZp4+oJojL4onj7fyp4fyZ6rk9XT+7PXV7dzjXUKv7FPp/rvJNY+6ut26fJ+3qj1vib8gyPWc6a1T4inXvYEp3PDrPseMH9JyM563G75Ze1vV6wq19CsbMe+Lh6JlJO9Dc267wF9fPq0Yxqdzv09IRieiu5NTzf3lV84QaFTJxM6Z9k1NTboB/947uWz1rwonjipy3k/2vWY2/nlw8YGx8/t+eOUl1dmD+/yPUO8fafBXobWfrCwqFhTb7/TnLb5xq9fkzc93awPxjlAZ8863ldZ1dft0qVpl5rfaw4c06Vpl5oPF6LlYSJi7ceJ/Xqlp6xf94pOhyLfmRvP/ZWbZ1aeuxaw1scpt07V6pVPOvYditHfl5ROU0FhkRmYXr/uFY3PyzeDRtHuR7rTX8Z7fRutbibK2hesCF9zG4EptzbqVl+snPqfKeEXexvlHCsd3emneltCP3N/PpXYfuoR+CLLivFTlRcLYxiw0w3rbIef9ARShc/h52nPp8KiYhXfNEk/e+Yn9kXoZRe67FNZsteKya4HXGwuVP/DNd8X0+flfgS9K64RRBeC02gcABdOSek0XZk93PFJrTEk+PMwbBL4IqqpriI4dIHccdfd8nq9MZ+ko2cZw/XdXk7txliPa0wgOW5TIwF8MVyUASLjZjORuYAAepZ16uWK8JBl+0/u+sLvtFoSno51Pp9eAcCFYP+VsvF5+RHDytH75i1YpAWLl2h3XW3M805Wdo7+5cdPmeVlrOc0EhaAO+Oa7/5vPRj3r+wB+Py5aKeYAQAAAAAA4Py4KEcQAQAAAAAA4PwhQAQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIr70qAr/P9s//Dzyufz6e8XfU+l0+/Uvj27derkSXNZYVGxHvy7h7X/j3sjPj+fZs+Zq+lfu1u1H7yvM2fO2BcrKztHf7/wMZ08GdSxo032xUhQYVGxvvPId3Wi7bhjfhr1xXNZXx0+sN++uMeUlE7TrPu+6Vru+OLKys7Rdx/7gfpcckmv1rHzIVZ7uhBi9am0vd4xb8EizbznXrW2tkTUBc5hAAAAn29fuBFEHR2dUp8+uqXkNvuiC27tC2u0/ImlCgaDUvjmZt6CRfavoYfUVFfpB4v/QTXVVVL4BnfJEz9WVnaO/asXrXkLFmn2nLn2j7+wPu9twl7Hmhob9IN/fFSVFZvsX73o2cvC3p4uBvY+9UK1F3tepYKPT4c0+ZYS+8c9wt6OAAAAcH584QJEHk+aDv7xjxqff60Ki4rtiwEAQDcdO9akTL9fJaXT7IsAAADwOdVn1NiCs/YP3cxbsEgjRl0tSTodalf56qcVOnVSZQ8/qiGZ/ojPmxobNHvOXGVcMUTPrFwuhaf0lD38qKp2vqPKik3Kys5R2UOPqJ83XZJU/d4OrX1hjRSeGjB9xkxJUmdHp159+fmYT66t2/f7h2n4iJEq/+lTCgaDKiwq1p1f/2v9vPwZNTU2mN81jtu6j5LSaSqeOCli3Zmz7tX6V19RTXWVsrJz9O2yeXr9V790PKbZc+aq6MabJFt+WLf7jW+XmXkpSRs3rNfe+jp9u2yeanfV6Mab/kppnrSI9RUewu+UZ0Z6AkePauz4PJ04cdw8fqtY6+/bU68JRcXq503vku/2PNu+tVL5BYVmPti33dIciDgGa74cPnhAktT6UUtCZW6vQwrXS0lmPTPq3fp1r5jlNCgjw9y2wvt/8eflMdNsZ20DRvpGjh4TUbdkOYZnVi43y7225vfmzZQ9b+zbfWnNz3T/3L/pUj8P7d/n2t6M/QSOHlX+dYVSOJ1Gvij8ZH7WfQ8ozZMmOZS/sV37enbWspSt7c6eM1dDs7Ll8aRp4MDBjvnp1EZm3nNvlzbh1E9Y02wcd23N75VfOME8fmNdu3kLFkXUudlz5kb0E9Y2ev3EmyPqjDWN9nzcuGG9JLnWMaO+GvVCkplW63YVRzuxsm/PyJtbSm4zt2Fs394HKpwOa99msNZH2fqn13/1S7MeWtuONb1DMv2Obcmp3I1jMSTS/w7KyFDxxEmu7cVYHq1N2Fn7IVnqkr3OG5875ZW97tnbpvX4jH3Zj8utP3Tqq2PlfbT27VSX7cdvZ7SjQOCoptw61SxHp3OjWzqc2t7U2+9U3R9qIh7u2PMFAAAAvSfudxAZN+E/XPpP2vzG6+p3uVfHjjUpe/hV+uSTT7Tqf/1Y772zTQXX36CBgwardleN8q+boH79vKrasV2SdNlll+mGiZPU9GGDDh/Yr2/+f3+rI386rJU/fkLvvbNNnr591XjkTyopnaYpt07V6pXL9dovXtKn+lS3lNwW8z0S1u1ve2uzbpg4SZn+YardVaOhWVdqzLhc/b66Spf06aOyhx9VqL3dTM+n+lR33Pk17duzW60tLbph4s1qaW7WsaNNmnLb7bpqxCh1dnSodleNrr5mrLKyr1JlxcYux2Nc9K7818e1Yf3/1dBhV+r64omq2rFdI68erSuzc1S9811t2/KWMq4Yoo4zZ/TDpf+kwwf2q/8An268ebIG+AbqJ8uXaetbm1Vw/Q26+pqxqtqx3bxR3vLWZv3H6qdV94cP9NXb79DJk0GdON6qGyZOUnr//vrJ8mV6q2JTl2OLZ/3hV41Q+eqn9dovXtK4vHyNz7tWtR+8r8suu0xlDz+qI4cPaeWPn9DmN17XV2+froyMIar7wwc6drRJN9w0SYcPHTS3ffNf3aIznR06fGB/l3xJH9BfxTdN0tEPG1W7qybuMj9z5oxyrhqhq0aMMvPkK7feJm96uvneqa98tVT1dbvU2tKiCTfcqH17dmvHf29VS3NAV40YqX/7yVN6q+INs764pdmefyWl0zRi1NVmGj49+6laAsc0aHCGWbeM91tZ6/7Iq0er8Pob1KdPH7O+FRYVq2DCDarasd1xux82/ElvVWzSmHG5OvDHffrJih/p2NEmjc8vcG1vxn4ajpxrUy3NAd08+Sv6VJ/q8IH95o32L9e+qBf+s1x1f/hAgwYNVnPgaETZvvfONt385Smu72aaPWeuxuflm2VV94cPNO3Ou8z37ORfN0Gjrr5Gv37tVb303LNd3kVSWFSsmyZ/Wf/2k6f02i9eUtvx4zodCumtije6tAmjzu6uqzXr3dBhV+rLJV9V7QfvS5JumDhJY8bl6devvaoX/rNcLc0B3THj647vykkf0F+5+QVmnf7q7dM1cOBgNX3YqGNHm3TTpC8reOKEqt/boVtKbtPLz/+nXvvFS/rLJ39R8cSbtW/Pbl3Sp4/+5ze+pW1v/9as65f0uSRqHTP6vPzrJig3v0Dv/Pfb+o/VT+svn/xFk/5qivkul1jtxM6+vbxrC3XHjK/r6IeNWvnjJyKOu/lYkybcMFFnOs6Y5Trlttt19tOz2vDauojtVu3Y3qUs+g/wme3J3l/s2L5VX7n1Nt1ya6lZDuPy8jX8qpFm/29Pm7VvtOo4cybu/ndYdo6uzM7Ru9u2OLaXWG3Czt4PtTQH1KdPH50KnlB+wQT9x+r/rQ3r/68yrhhi1iOnvtzOfhxD/H7d/j/uUlqaRz9c+k9qaQ7ohok3m3XWfhzW/lDhOh9v3hvBIaf23fZRi2NdDsR4f9CNN0/Wx6dP6/X1/1d51xZqfN61qtqxPaKOxEpH9Xs7VFhUrCtzrlLDnw7prr/+n9r29m/161++2qUdAQAA4PyIa4pZYVGxBg0erPXrXjE/27hhvZoaG1RTXaVfrVsrSQoGgzpy+JD5RDsexneDwaDe3fa2JCk3r0C762rNJ8vv73xXHR2dGjl6TMS60QSDQVVs3KDRY8Z2eY/ByNFj5PF4ItLz/s53FQqFNDY3T02NDWo7flyDMjIkSf0HDFDtBzXmsY7LzdexpsYuo3N8Pp+Gjxipqp3vmMu2Vr4pb3p6l2Nw09nZqYqNGxQMBhUMBlW18x1509Pl8/k0NjdPoVBI7+98Vwq/32T/vr0al5tvrm/dt1086295a7OZ79u3Vsrj8cjbf4CZZ1sr3zS/u37dKzp9ut38u7Jik/nkuamxQc2BgPz+YY75UlmxyRwdoQTLfE99rdI8feXz+ZTpH6rmQEBtbSeU6R+qrOwcedPTtbe+zr6aK7c0O/F6veayd7e97ZrXdi3NAb3483Lz7+1bKzVo8GCzXsS73VjtraU5oDd+/Zr53Q8bj8jvHyZJmnxLiXbX/sF8st/U2KCNG9Z3KVuj3uXmFZjbNRhlac2zpsYG7a6rjfj+h41HuowaskrzeJTpHypJ+uD933UZRWIw6qyRJoXblMfjiagbm3/zurm/muoqNR87FlGvDUa98PYfIG//AQq1t6u5OaBBGRny+XwampWtPfW1kqT/evbfzePaW1+nzs5O85glmfna1NigD97/nfl5LIcPHjDbyfs739WJE+f6mnjaiRPr9urrdul0qN0sS+txG+VaPHGSfD6fub/6ul22LcbPqAdGe7eWe33dLrPvckqbW9+YbP/rJlqbsPL5fCqeOCmibtdUV5nt8fln/83c55762qj9hBPrceypr9Xp0yFt31opSTq0f59C7e1mmuPpD+PN+3jad7J1WeHzwKDBgx2ndMdKx/atlRo9ZqxmfeNbCrW3xxy5BAAAgN4VV4BoUEaGOjo6FDrl/Otf8xYs0opV5VqxqjxiCH4sxrDxFavKtej7j5s3Et70dBXdeJO5zSXLluvK7Gzz4jleNdVVajt+XDPvuTfic6f0BINBhdrbzQvl1o9alJtXoKzsHHk8ffXutrflTU/XmLG5ETeRVt7+A+T1ejV9xkzz2BcsXqKBAwdH3Fgmy+8fpiGZfi1Ztjwiv+MNyHVnfac8s/P5fFr0/cfNbRvTLrz9B8jj8aittdW+ihReL5Eybw4cU3q6VyNHj9G43HzV1+3SsaZGjcvNPxdQaG93DTh0R2XFJm15a7MWLF6iHz212vGGKF7NgWPq6OiQkthuMu3NyONA4Kh90bngxMBBWrB4ibnd6TNmmjeYVm5lGQgcdfy+k5rqKq1/9RXNuu8BrVhVbk67c+L3D1OovT0iGBA6dVIdHR2OdcPQ+lGL/SMpvK7CgaexuXlq/ahFH/y+Srl5BeZN66H9+6RwYPxHT60223G/fuemuAWDQZX/9CkNHzFSK1aV99jLid3yNhFtra1qazvh2k731teZwbWRo8eoo6PTDBh3V+tHLa75nmjfmEz/212x8r+kdJp57Pd/60GlpZ2bkpWM5sCxc8HJwDH7ooT7Q8XI+2jtW1K363JT+EFD6fQZSvf2Nz+PJx011VXav2+vsnOGRzywAQAAwIURV4CorbXV9Wmp8S6ChfPLtHB+marf22H/SlTPrFyuhfPLdOTwIZU9/KgkKdTero0b1pvbNP4l83Rx/bpXlOn367oJN5ifRUuPcQO9p75WgwYP1o03T1brRy3at7deofZ25RYUyONJc7ywD506qba2E3rpuWcjjvuxRx+KOpoiXoHAUR0+eKBLvsT7foburm8d5SJJmf6hSvN4JNu7gYztWkc+pKWlRdzcGDcPsgTn4i1zY4TBdRNuUMYVQ7S3vk576ms1NCtbw4ePdL1R6gmVFZu0cH6ZXn35ec2cdW/MYI6bTP9QecJ5pwS2m2x7swdArdpaW/VhY6OWLVkUkffWX4cyRAvO2AM50dRUV+mxRx/SyieXacqtU12DRG6Bp46OTtcbeVlGJtoZo65y8wo0+pqx2lNfq731dfKmp2vEqKvV2XFGQcs7y1avfFIL55dp5ZPLIkbLBYNBLX9iqRbOL5MsU3C7K1o76QlG2xmXm5/wSJzuSLRvTKb/7a5oddt4h5DRRl567ll1dnbav9YjEu0PY4nVvnuiLhsjo75y21Tzs3jSkZWdo+EjRioUCl2UvzwKAACQauIKEB3av08dHR0RI3Gmz5ipMePGR4xKMKYRGAKBo8r0+80pBHfcdbf5kkyfz6cHHvyOeeNnHdlQX7dLU26d2mXqgcIX6v/y46cclzlpamzQlrc2K/+6QvOJr1N6SkqnKdPvN6egHNq/T+3tIV1bMMF8Wl1ft0vXFkxwHaESDAZ1rKlRpdNndLmh7Ql76+u69asx3VnfyBfrRfzkW0qUdum5IIf96XtWdo4y/efK2pgCYUxtkaTrJ95s1gXFKHMn9XW7NOqaa9TRcUZNjQ3mqI/hV43oldEFkvT1e2abgZvmwDF1hkcANQeOKS0tTWNz86TwyJOCwqKIdYdk+nX9xJulcN0vnT5D+/ftVVNjg+t27eyjgOztLZb6ul0qKCwy95WVnaPpM2bq0P598njSdMddd9tX6cIIsFjLKis7R1NunRr3VKWS0mlmHQydOqlQKGT/imlvfZ28Xm/Esd1x193yeNLMMpcUUbeMtmydDmm1p75WgwdnyJs+QIf27wsHTVo14YYbzTTYR8yNzc0zRxBlZefomw/+rbm9ngpIxtNOesL2rZW6Zsw4jR4z1jWPelqifWMy/W93OdXtwqJi3fzlr3QZyTYuN79bI4hiSbQ/jCZa+3ary77waNDZc+Zavu0uGJ7SPXLUNWY7UYx0+Hw+fWPu36hq5zt6cc3PNHrMWNfAeGFRsf7lyf/tuhwAAAA9I64AUTA8pcKbnm4OFc8vnKDA0SZV7XzHnDaweOkP1dHx2VPVyopNag4EzKHtffteppbmgLnN9P79zelOU26dqhfX/EzBYFCVFZu0u642Yki8MQUtGfb3eDilx/pLLMZ3jjU16kzHGfNGtK21VZf380a9EV77whqF2tsjpnG5PZHdWvmmMv1+rYgxzcbQ1Nig9evWaurtd5rbjmdKkqE76xvrFhQWmet+2HjEHFVhTDO4/1sPasWqcpU99EjEjb/x/h0jX3LzCiLKJNEy31tfJ509q/1/3CuFy6uz40xEedkZUw4XLF7iWibRBIMnzPQtWLxE27dtUU11lRmENNpB6fQZ2rs78h1ILc0BXTehWCvC0yxC7e3mr1K5bVfhm/mCwiL96KnVGjl6TNT2FktlxSZt/s3rEfv6+MzHCgaDenHNzzQ+L9/M+xVR6uTaF9ZElNWCxUu05a3NcY9uaGttNevgkmXLdeTwIXNde5toamxQ+eqnI47N+stHhsDRo1q89IdasapcU2+/U+vXrXUNIhzav09nOs5EjJ45dfKkdPasGQg1pl0Z9bV44iSzrodOndTQYVkRx2PU7+7WsVjtpCcY6W87ftw1j+RQFt2VSN+YbP9rbS/x9Gt29ro9674H9PHp0xF5sWJVuYZmZUeMIOrpvEq0P4wmWvuOVpcTVVNdpV011RGfRUvHN759bsTS+zvfVVNjg7Zv26JZ9z2gwqLibrcjAAAAJCehn7kHrIxpONafzY6Xz+Hn6oFEfdHrUW+krze2CQAAAODzL64RRICdMU0q1igEN3fcdbe8Xm9CvzYGpJreaCfGVKOeejk1AAAAgC8GRhAhLsaoA+v7UA4fPBD3C65nz5kb8Ytbp0PtEVP6gGR80UbD9GY7KSwq1qz7HlBn55ke2yYAAACALw4CRAAAAAAAACmOKWYAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOIIEAEAAAAAAKQ4AkQAAAAAAAApjgARAAAAAABAiiNABAAAAAAAkOL6jBpbcNb+oZus7ByVPfSI+nnTzc+q39uhtS+sMf+ePWeuim68yfz7dKhd5aufVlNjgySpsKhYs+57QGmeNPM7GzesV2XFJvPveQsWacSoq82/W5oDKv/pUwoGg1Ic++A4EztOAAAAAACQ2hIKEH3pS5cq0+9Xn0s+G3gUOnUyItDg8/nk7T/A/Pvsp5+qORDQJ5/8RZLk6dtXGVcMUZ8+fczvnDh+XKdPh8y/B2dcocsuv9z8+y+dnWppbtbZs59KceyD40zsOAEAAAAAQGpLKEAEAAAAAACALx7eQQQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIojQAQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIojQAQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIojQAQAAAAAAJDiCBABAAAAAACkOAJEAAAAAAAAKY4AEQAAAAAAQIr7/wGHZSgOLf8vaQAAAABJRU5ErkJggg==" + } + }, + "cell_type": "markdown", + "id": "a8dcd130", + "metadata": {}, + "source": [ + "![image.png](attachment:image.png)" + ] + }, + { + "cell_type": "markdown", + "id": "1a086c45", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/05_weathermate_ai_agent.ipynb b/week2/community-contributions/05_weathermate_ai_agent.ipynb new file mode 100644 index 0000000..0f6502a --- /dev/null +++ b/week2/community-contributions/05_weathermate_ai_agent.ipynb @@ -0,0 +1,557 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "ae1ef804-3504-488d-af86-5a0da36fea78", + "metadata": {}, + "source": [ + "# ☀️🏃‍♀️ WeatherMate\n", + "----\n", + "\n", + "**WeatherMate** is a conversational **AI agent** that analyzes real-time weather conditions and suggests the best activities and events based on location. Whether it's sunny, rainy, or snowy, WeatherMate helps you make the most of your day! \n", + "\n", + "Here's how it works:\n", + "1. Get current weather conditions for the user's location.\n", + "2. Recommend suitable indoor or outdoor activities based on the weather.\n", + "3. Find relevant events using the Ticketmaster API.\n", + "4. Merge both activity suggestions and events into a single, structured response.\n", + "\n", + "---\n", + "\n", + "Large Language Models (LLMs), by themselves, cannot fetch real-time data such as weather information. To enable LLMs to access and use such real-time data, we integrate **external tools.** \n", + "\n", + "In this notebook, we will implement a weather API, allowing the assistant to fetch real-time weather information and use it for personalized activity suggestions based on current weather conditions. This is an essential step in transforming an LLM into a more interactive and data-driven AI assistant.\n", + "\n", + "\n", + "In this notebook, we will develop a conversational AI Agent that helps users receive personalized activity recommendations based on real-time weather data.\n", + "\n", + "- 🧑‍💻 Skill Level: Advanced\n", + "- 📤 Output Format: conversational chat\n", + "- 🚀 Tools:\n", + " - Weather API integration \n", + " - Ticketmaster API\n", + " - OpenAI with external tool handling\n", + " - Gradio for the UI\n", + "\n", + "🛠️ Requirements\n", + "- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n", + "- 🔑 OpenAI API Key\n", + "- 🔑 Weather API integration (https://www.weatherapi.com)\n", + "- 🔑 Ticketmaster API (https://developer.ticketmaster.com/explore/)\n", + "\n", + "⚙️ Customizable by user\n", + "- 🤖 Selected model\n", + "- 📜 system_prompt: Controls model behavior\n", + "\n", + "---\n", + "📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)" + ] + }, + { + "cell_type": "markdown", + "id": "ad262788", + "metadata": {}, + "source": [ + "**Class Diagram**\n", + "\n", + "![](https://github.com/lisekarimi/lexo/blob/main/assets/05_weather_class_diagram.png?raw=true)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6b7a492-f510-4ba4-bbc3-239675d389dd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "from datetime import datetime\n", + "\n", + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if not openai_api_key:\n", + " print(\"❌ OpenAI API Key is missing!\")\n", + "\n", + "weather_api_key = os.getenv('WEATHERAPI_KEY')\n", + "if not weather_api_key:\n", + " print(\"❌ Weather API Key is missing!\")\n", + "\n", + "ticketmaster_api_key = os.getenv('TICKETMASTER_KEY')\n", + "if not ticketmaster_api_key:\n", + " print(\"❌ TicketMaster API Key is missing!\")\n", + "\n", + "\n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "347dbe00-5826-4aa6-9d2c-9d028fc33ec8", + "metadata": {}, + "outputs": [], + "source": [ + "# Get today's date and day name\n", + "today_str = datetime.today().strftime('%Y-%m-%d')\n", + "day_name = datetime.today().strftime('%A')\n", + "\n", + "nb_activity = 10\n", + "\n", + "\n", + "system_message = f\"\"\"\n", + "You are a fun and helpful assistant for an Activity Suggestion App.\n", + "Your job is to recommend **up to {nb_activity} activities** based on the real-time weather fetched from the API, ensuring a mix of **indoor, outdoor, and event-based activities** whenever possible.\n", + "\n", + "The total must always be **10 or fewer**, following this rule:\n", + "**nb_events + nb_indoors + nb_outdoors ≤ 10**.\n", + "\n", + "You must **analyze and think carefully** to determine the best combination of activities and events for the user.\n", + "- Evaluate **weather conditions** to decide if outdoor activities are suitable.\n", + "- Check **event availability** and select the most relevant ones.\n", + "- Balance **indoor, outdoor, and event-based activities** dynamically to provide the best experience.\n", + "\n", + "If one of these categories is unavailable, that's fine—just provide the best possible suggestions without exceeding **10 activities**.\n", + "Deliver everything **in one go—no waiting!**\n", + "\n", + "\n", + "### **Understanding Relative Dates**\n", + "- Always interpret relative dates based on **{today_str} ({day_name})**.\n", + "- The weekend always refers to Saturday and Sunday.\n", + "- \"Next {day_name}\" should refer to the **closest upcoming occurrence** of that day.\n", + "- If the user asks for a time range (e.g., \"the next 3 days\"), calculate the **exact date range** starting from today.\n", + "- If no specific date is mentioned, **assume today by default**.\n", + "- **Do not ask for confirmation** when interpreting dates—just assume the correct date and proceed confidently unless there's real ambiguity.\n", + "\n", + "### **Activity and Event Suggestion Process**\n", + "To provide the best {nb_activity} activity recommendations, follow these steps:\n", + "Step 1: Retrieve Weather Data – Use the Weather API to get current conditions for the user's location.\n", + "Step 2: Suggest Activities – Recommend suitable indoor or outdoor activities based on the weather.\n", + "Step 3: Fetch Events (if available) – Use the Ticketmaster API to find relevant events in the user’s area.\n", + "Step 4: Combine Everything – Merge both event listings and activity suggestions into a single, well-structured response.\n", + "This entire process should be done seamlessly in one go without making the user wait.\n", + "\n", + "### **How to Handle Each API**\n", + "- **Weather API Handling**:\n", + " - If the user requests a relative date (e.g., \"tomorrow,\" \"next Monday\"), calculate the number of days from today.\n", + " - Provide the weather forecast only for the requested date, ignoring any other days in the response.\n", + " - If no weather data is available, inform the user in a friendly, light-hearted way.\n", + " - The forecast is limited to 14 days, so if the user requests a longer period, politely let him know.\n", + "\n", + "- **Ticketmaster API Handling**:\n", + " - If the user asks for events today, set the start date as today’s date.\n", + " - If the user asks for any specific weekday, find the next occurrence of that day and use it as the start date.\n", + " - If the user asks for a range of days (e.g., \"the next 3 days\"), use today’s date as the start date.\n", + " - The country corresponding to the user's city must be represented using the ISO Alpha-2 Code (e.g., FR for France, US for the United States, CA for Canada, DK for Denmark).\n", + " - If more than 5 events are found, ask the user for their interests to refine the search, using a one-word keyword like 'music,' 'cinema,' or 'theater.'\n", + " - If no events are found, explicitly inform the user in a friendly, funny way.\n", + " - Do not mention Ticketmaster unless necessary; simply state that you are checking for events.\n", + "\n", + "### **User Interaction Rules**\n", + "- If the user **doesn’t mention a city**, **ask them to provide one**.\n", + "- If an event search fails, do **not** mention Ticketmaster; simply say that no events were found.\n", + "- Ensure all activity suggestions are provided **in one response**, combining weather-based activities and event suggestions.\n", + "\n", + "\n", + "### **Event Formatting in Output**\n", + "**If Ticketmaster events are available**, format the output as follows:\n", + "Here are some events that may interest you:\n", + "**Event Name**:\n", + "- 📅 Date: Give the date like 19th March 2025\n", + "- 📍 Venue:\n", + "- 🔗 Ticket Link: Put the URL here\n", + "\n", + "(And don't forget to separate these gems with a snazzy divider)\n", + "\n", + "**Event Name**:\n", + "- 📅 Date: Give the date like 19th March 2025\n", + "- 📍 Venue:\n", + "- 🔗 Ticket Link: Put the URL here\n", + "\n", + "(Another divider, because we like to keep things fresh!)\n", + "\n", + "**Event Name**:\n", + "- 📅 Date: Give the date like 19th March 2025\n", + "- 📍 Venue:\n", + "- 🔗 Ticket Link: Put the URL here\n", + "\n", + "### **Tone and Style**\n", + "**Keep it short, fun, and don’t forget to add a dash of humor!**\n", + "Your job is to keep the user smiling while giving them the **best activities for the day**.\n", + "Be **accurate and concise**, but let’s keep it **light and lively!** 🎉\n", + "\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "578da33d-be38-4c75-8a96-9d6bfc1af99b", + "metadata": {}, + "outputs": [], + "source": [ + "class WeatherAPI:\n", + " def get_weather(self, city: str, days: int) -> dict:\n", + " \"\"\"Fetches weather data for the given city for the next 'days' number of days.\"\"\"\n", + " url = \"https://api.weatherapi.com/v1/forecast.json\"\n", + " params = {\"key\": weather_api_key, \"q\": city, \"days\": days}\n", + " # print(f\"params weather: {params}\")\n", + " response = requests.get(url, params=params)\n", + "\n", + " if response.status_code == 200:\n", + " data = response.json()\n", + " forecast = []\n", + " for day in data[\"forecast\"][\"forecastday\"]:\n", + " forecast.append({\n", + " \"date\": day[\"date\"],\n", + " \"temp\": day[\"day\"][\"avgtemp_c\"]\n", + " })\n", + "\n", + " result = {\n", + " \"city\": city,\n", + " \"forecast\": forecast\n", + " }\n", + " return result\n", + " else:\n", + " return {\"error\": f\"City '{city}' not found or other issue. Please check the city name and try again.\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "305f9f18-8556-4b49-9f6b-4a2233eefae9", + "metadata": {}, + "outputs": [], + "source": [ + "from abc import ABC, abstractmethod\n", + "\n", + "class BaseEventAPI(ABC):\n", + " @abstractmethod\n", + " def get_events(self, city, country_code, keywords, size):\n", + " \"\"\"Fetches upcoming events from an event provider.\"\"\"\n", + " pass # Subclasses must implement this method\n", + "\n", + "class TicketmasterAPI(BaseEventAPI):\n", + " def get_events(self, city, country_code, keywords, start_date):\n", + " \"\"\"Fetches upcoming events from Ticketmaster for a given city.\"\"\"\n", + " url = \"https://app.ticketmaster.com/discovery/v2/events.json\"\n", + " params = {\n", + " \"apikey\": ticketmaster_api_key,\n", + " \"city\": city,\n", + " \"countryCode\": country_code,\n", + " \"keyword\": \",\".join(keywords),\n", + " \"size\": 10,\n", + " \"startDateTime\": start_date\n", + " }\n", + "\n", + " response = requests.get(url, params=params)\n", + "\n", + " if response.status_code == 200:\n", + " data = response.json()\n", + " events = data.get(\"_embedded\", {}).get(\"events\", [])\n", + " return [\n", + " {\n", + " \"name\": event[\"name\"],\n", + " \"date\": event[\"dates\"][\"start\"][\"localDate\"],\n", + " \"venue\": event[\"_embedded\"][\"venues\"][0][\"name\"],\n", + " \"url\": event.get(\"url\", \"N/A\") # Using .get() to avoid KeyError\n", + " }\n", + " for event in events\n", + " ] if events else []\n", + " else:\n", + " return {\"error\": f\"API request failed! Status: {response.status_code}, Response: {response.text}\"}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c60820f-4e9f-4851-8330-52c8fd676259", + "metadata": {}, + "outputs": [], + "source": [ + "class ChatAssistant:\n", + " def __init__(self):\n", + " self.model = MODEL\n", + " self.tools = [\n", + " {\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": \"get_weather\",\n", + " \"description\": \"Get the current weather and forecast for the destination city.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city for which the weather is being requested.\"\n", + " },\n", + " \"days\": {\n", + " \"type\": \"integer\",\n", + " \"description\": \"The number of days for the weather forecast (can be 1, 2, 6, or 10).\"\n", + " }\n", + " },\n", + " \"required\": [\"city\", \"days\"],\n", + " \"additionalProperties\": False\n", + " }\n", + " }\n", + " },\n", + " {\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": \"get_ticketmaster_events\",\n", + " \"description\": \"Fetch upcoming events from Ticketmaster.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"City where the events are searched.\"\n", + " },\n", + " \"country_code\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Country code for filtering results.\"\n", + " },\n", + " \"keywords\": {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"string\"\n", + " },\n", + " \"description\": \"Optional keywords for event search (e.g., 'music', 'concert').\"\n", + " },\n", + " \"size\": {\n", + " \"type\": \"integer\",\n", + " \"description\": \"Number of events to fetch.\"\n", + " },\n", + " \"start_date\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Start date for the event search.\"\n", + " }\n", + " },\n", + " \"required\": [\"city\", \"country_code\", \"size\", \"start_date\"],\n", + " \"additionalProperties\": False\n", + " }\n", + " }\n", + " }\n", + " ]\n", + "\n", + " def chat(self, user_message, history, weather_api, event_apis):\n", + " # Build the conversation\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": user_message}]\n", + "\n", + " # OpenAI response\n", + " response = openai.chat.completions.create(model=self.model, messages=messages, tools=self.tools, stream=True)\n", + "\n", + " recovered_pieces = {\n", + " \"content\": None,\n", + " \"role\": \"assistant\",\n", + " \"tool_calls\": {}\n", + " }\n", + " last_tool_calls = {}\n", + " has_tool_call = False\n", + " result = \"\" # Initialize result accumulator\n", + " # previous_index = None # Track the last processed index\n", + "\n", + " for chunk in response:\n", + " delta = chunk.choices[0].delta\n", + " finish_reason = chunk.choices[0].finish_reason\n", + "\n", + " # Handle tool call detection\n", + " if delta.tool_calls and finish_reason in [None, \"tool_calls\"]:\n", + " has_tool_call = True\n", + " piece = delta.tool_calls[0] # Get the first piece in the tool call\n", + "\n", + " # Create a dictionary for the tool call if it doesn't exist yet\n", + " recovered_pieces[\"tool_calls\"][piece.index] = recovered_pieces[\"tool_calls\"].get(\n", + " piece.index, {\"id\": None, \"function\": {\"arguments\": \"\", \"name\": \"\"}, \"type\": \"function\"}\n", + " )\n", + "\n", + " if piece.id:\n", + " recovered_pieces[\"tool_calls\"][piece.index][\"id\"] = piece.id\n", + " if piece.function.name:\n", + " recovered_pieces[\"tool_calls\"][piece.index][\"function\"][\"name\"] = piece.function.name\n", + " recovered_pieces[\"tool_calls\"][piece.index][\"function\"][\"arguments\"] += piece.function.arguments\n", + "\n", + " # Store the tool call in the dictionary by index\n", + " last_tool_calls[piece.index] = recovered_pieces[\"tool_calls\"][piece.index]\n", + "\n", + " # Store content in result and yield\n", + " else:\n", + " result += delta.content or \"\"\n", + " if result.strip():\n", + " yield result\n", + "\n", + "\n", + " # Handle tool call scenario\n", + " if has_tool_call:\n", + " # Handle the tool calls\n", + " response = self.handle_tool_call(last_tool_calls, weather_api, event_apis)\n", + "\n", + " if response: # Only iterate if response is not None\n", + " tool_calls_list = [tool_call for tool_call in last_tool_calls.values()]\n", + " messages.append({\"role\": \"assistant\", \"tool_calls\": tool_calls_list}) # Append the tool calls to the messages\n", + "\n", + " # Dynamically process each tool call response and append it to the message history\n", + " for res in response:\n", + " messages.append({\n", + " \"role\": \"tool\",\n", + " \"tool_call_id\": res[\"tool_call_id\"],\n", + " \"content\": json.dumps(res[\"content\"])\n", + " })\n", + "\n", + " # New OpenAI request with tool response\n", + " response = openai.chat.completions.create(model=self.model, messages=messages, stream=True)\n", + "\n", + " result = \"\" # Reset result before second stream\n", + " for chunk in response:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " if result.strip():\n", + " yield result\n", + "\n", + "\n", + " def handle_tool_call(self, tool_call, weather_api, event_apis):\n", + " stored_values = {} # Dictionary to store the valid value for each field\n", + "\n", + " for index, call in tool_call.items():\n", + " # Load the arguments for each tool call dynamically\n", + " arguments = json.loads(call[\"function\"][\"arguments\"])\n", + "\n", + " # Iterate over all keys dynamically\n", + " for key, value in arguments.items():\n", + " # Update the field if it's currently None or hasn't been set before\n", + " if key not in stored_values or stored_values[key] is None:\n", + " stored_values[key] = value\n", + "\n", + " city = stored_values.get('city')\n", + " days = stored_values.get('days')\n", + " country_code = stored_values.get('country_code')\n", + " keywords = stored_values.get('keywords', [])\n", + " # size = stored_values.get('size')\n", + " start_date = stored_values.get('start_date')\n", + " start_date = str(start_date) + \"T00:00:00Z\"\n", + "\n", + " weather_data = None\n", + " event_data = None\n", + "\n", + " # Iteration over tool_call\n", + " for call in tool_call.values():\n", + " if call[\"function\"][\"name\"] == \"get_weather\":\n", + " weather_data = weather_api.get_weather(city, days)\n", + "\n", + " if call[\"function\"][\"name\"] == \"get_ticketmaster_events\":\n", + " event_data = event_apis[\"ticketmaster\"].get_events(city, country_code, keywords, start_date)\n", + "\n", + " responses = []\n", + "\n", + " # Ensure weather response is always included\n", + " weather_tool_call_id = next((call[\"id\"] for call in tool_call.values() if call[\"function\"][\"name\"] == \"get_weather\"), None)\n", + " if weather_data and \"forecast\" in weather_data:\n", + " responses.append({\n", + " \"role\": \"assistant\",\n", + " \"content\": {\"weather\": weather_data[\"forecast\"]},\n", + " \"tool_call_id\": weather_tool_call_id\n", + " })\n", + " elif weather_tool_call_id:\n", + " responses.append({\n", + " \"role\": \"assistant\",\n", + " \"content\": {\"message\": \"No weather data available for this location.\"},\n", + " \"tool_call_id\": weather_tool_call_id\n", + " })\n", + "\n", + " # Ensure event response is always included\n", + " event_tool_call_id = next((call[\"id\"] for call in tool_call.values() if call[\"function\"][\"name\"] == \"get_ticketmaster_events\"), None)\n", + " if event_data:\n", + " responses.append({\n", + " \"role\": \"assistant\",\n", + " \"content\": {\"events\": event_data},\n", + " \"tool_call_id\": event_tool_call_id\n", + " })\n", + " elif event_tool_call_id:\n", + " responses.append({\n", + " \"role\": \"assistant\",\n", + " \"content\": {\"message\": \"No events found for this location.\"},\n", + " \"tool_call_id\": event_tool_call_id\n", + " })\n", + "\n", + " # print(\"Final responses:\", responses)\n", + " return responses\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "191a3a9e-95e1-4ca6-8992-4a5bafb9b8ff", + "metadata": {}, + "outputs": [], + "source": [ + "# GradioInterface class to handle the Gradio UI\n", + "class GradioInterface:\n", + " def __init__(self, activity_assistant):\n", + " self.activity_assistant = activity_assistant\n", + "\n", + " def launch(self):\n", + " # Gradio chat interface\n", + " gr.ChatInterface(fn=self.activity_assistant.chat, type=\"messages\").launch()\n", + "\n", + "# ActivityAssistant setup\n", + "class ActivityAssistant:\n", + " def __init__(self):\n", + " self.weather_api = WeatherAPI() # Interact with the Weather API\n", + " self.event_apis = { # Interact with the Events API\n", + " \"ticketmaster\": TicketmasterAPI()\n", + " }\n", + " self.chat_assistant = ChatAssistant() # This will handle conversation with OpenAI\n", + "\n", + " def chat(self, user_message, history):\n", + " # Forward the user message and conversation history to ChatAssistant\n", + " response_stream = self.chat_assistant.chat(user_message, history, self.weather_api, self.event_apis)\n", + " for chunk in response_stream:\n", + " yield chunk" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b501e8e-2e10-4ab7-b523-1d4b8ad358e8", + "metadata": {}, + "outputs": [], + "source": [ + "# Main execution\n", + "if __name__ == \"__main__\":\n", + " activity_assistant = ActivityAssistant()\n", + " gradio_interface = GradioInterface(activity_assistant)\n", + " gradio_interface.launch()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 7edfba7dca0156cee124ab1409a184647fdcb468 Mon Sep 17 00:00:00 2001 From: Simon <134164156+simondb94@users.noreply.github.com> Date: Fri, 6 Jun 2025 03:27:08 +0100 Subject: [PATCH 23/23] Add files via upload Improvements made. --- .../simondb94-Improved-LLM-Tutor-.ipynb | 1449 +++++++++++++++++ 1 file changed, 1449 insertions(+) create mode 100644 week1/community-contributions/simondb94-Improved-LLM-Tutor-.ipynb diff --git a/week1/community-contributions/simondb94-Improved-LLM-Tutor-.ipynb b/week1/community-contributions/simondb94-Improved-LLM-Tutor-.ipynb new file mode 100644 index 0000000..dab89a9 --- /dev/null +++ b/week1/community-contributions/simondb94-Improved-LLM-Tutor-.ipynb @@ -0,0 +1,1449 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "Improved-LLM-Tutor" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# Standard library imports\n", + "import os\n", + "import time\n", + "import json\n", + "from typing import Dict, List, Any, Optional, Union, Callable\n", + "\n", + "# Third-party imports\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, HTML, update_display\n", + "from openai import OpenAI\n", + "import ollama\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Try to import rich, install if not available\n", + "try:\n", + " from rich.console import Console\n", + " from rich.markdown import Markdown as RichMarkdown\n", + " from rich.panel import Panel\n", + "except ImportError:\n", + " !pip install rich\n", + " from rich.console import Console\n", + " from rich.markdown import Markdown as RichMarkdown\n", + " from rich.panel import Panel\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Constants\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "DEFAULT_SYSTEM_PROMPT = \"You are a helpful technical tutor who answers questions about python code, software engineering, data science and LLMs\"\n", + "\n", + "# Set up environment\n", + "load_dotenv()\n", + "openai = OpenAI()\n", + "console = Console()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "class LLMTutor:\n", + " \"\"\"\n", + " A class that provides tutoring functionality using multiple LLM models.\n", + " \"\"\"\n", + " \n", + " def __init__(self, \n", + " system_prompt: str = DEFAULT_SYSTEM_PROMPT,\n", + " gpt_model: str = MODEL_GPT,\n", + " llama_model: str = MODEL_LLAMA):\n", + " \"\"\"\n", + " Initialize the LLM Tutor with specified models and system prompt.\n", + " \n", + " Args:\n", + " system_prompt: The system prompt to use for the LLMs\n", + " gpt_model: The OpenAI GPT model to use\n", + " llama_model: The Ollama model to use\n", + " \"\"\"\n", + " self.system_prompt = system_prompt\n", + " self.gpt_model = gpt_model\n", + " self.llama_model = llama_model\n", + " self.history: List[Dict[str, Any]] = []\n", + " self.response_times = {'gpt': [], 'llama': []}\n", + " \n", + " def format_question(self, question: str) -> str:\n", + " \"\"\"\n", + " Format the user's question with a standard prefix.\n", + " \n", + " Args:\n", + " question: The user's question\n", + " \n", + " Returns:\n", + " Formatted question with prefix\n", + " \"\"\"\n", + " return f\"Please give a detailed explanation to the following question: {question}\"\n", + " \n", + " def create_messages(self, question: str) -> List[Dict[str, str]]:\n", + " \"\"\"\n", + " Create the message structure for LLM API calls.\n", + " \n", + " Args:\n", + " question: The user's question\n", + " \n", + " Returns:\n", + " List of message dictionaries\n", + " \"\"\"\n", + " formatted_question = self.format_question(question)\n", + " return [\n", + " {\"role\": \"system\", \"content\": self.system_prompt},\n", + " {\"role\": \"user\", \"content\": formatted_question}\n", + " ]\n", + " \n", + " def get_gpt_response(self, \n", + " question: str, \n", + " stream: bool = True) -> str:\n", + " \"\"\"\n", + " Get a response from the GPT model.\n", + " \n", + " Args:\n", + " question: The user's question\n", + " stream: Whether to stream the response\n", + " \n", + " Returns:\n", + " The model's response as a string\n", + " \"\"\"\n", + " messages = self.create_messages(question)\n", + " start_time = time.time()\n", + " \n", + " try:\n", + " if stream:\n", + " return self._stream_gpt_response(messages)\n", + " else:\n", + " response = openai.chat.completions.create(\n", + " model=self.gpt_model, \n", + " messages=messages\n", + " )\n", + " elapsed = time.time() - start_time\n", + " self.response_times['gpt'].append(elapsed)\n", + " return response.choices[0].message.content\n", + " except Exception as e:\n", + " console.print(f\"[bold red]Error with GPT model:[/bold red] {str(e)}\")\n", + " return f\"Error: {str(e)}\"\n", + " \n", + " def _stream_gpt_response(self, messages: List[Dict[str, str]]) -> str:\n", + " \"\"\"\n", + " Stream a response from the GPT model.\n", + " \n", + " Args:\n", + " messages: The messages to send to the model\n", + " \n", + " Returns:\n", + " The complete response as a string\n", + " \"\"\"\n", + " start_time = time.time()\n", + " try:\n", + " stream = openai.chat.completions.create(\n", + " model=self.gpt_model, \n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " \n", + " for chunk in stream:\n", + " delta_content = chunk.choices[0].delta.content or ''\n", + " response += delta_content\n", + " # Clean the response for display\n", + " clean_response = response.replace(\"```python\", \"```\").replace(\"```\", \"\")\n", + " update_display(Markdown(clean_response), display_id=display_handle.display_id)\n", + " \n", + " elapsed = time.time() - start_time\n", + " self.response_times['gpt'].append(elapsed)\n", + " return response\n", + " except Exception as e:\n", + " console.print(f\"[bold red]Error streaming GPT response:[/bold red] {str(e)}\")\n", + " return f\"Error: {str(e)}\"\n", + " \n", + " def get_llama_response(self, question: str) -> str:\n", + " \"\"\"\n", + " Get a response from the Llama model.\n", + " \n", + " Args:\n", + " question: The user's question\n", + " \n", + " Returns:\n", + " The model's response as a string\n", + " \"\"\"\n", + " messages = self.create_messages(question)\n", + " start_time = time.time()\n", + " \n", + " try:\n", + " response = ollama.chat(model=self.llama_model, messages=messages)\n", + " elapsed = time.time() - start_time\n", + " self.response_times['llama'].append(elapsed)\n", + " return response['message']['content']\n", + " except Exception as e:\n", + " console.print(f\"[bold red]Error with Llama model:[/bold red] {str(e)}\")\n", + " return f\"Error: {str(e)}\"\n", + " \n", + " def ask(self, question: str, models: List[str] = ['gpt', 'llama']) -> Dict[str, str]:\n", + " \"\"\"\n", + " Ask a question to one or more models.\n", + " \n", + " Args:\n", + " question: The user's question\n", + " models: List of models to query ('gpt', 'llama', or both)\n", + " \n", + " Returns:\n", + " Dictionary with model responses\n", + " \"\"\"\n", + " responses = {}\n", + " \n", + " # Store the question in history\n", + " self.history.append({\n", + " 'question': question,\n", + " 'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),\n", + " 'responses': {}\n", + " })\n", + " \n", + " # Get responses from requested models\n", + " if 'gpt' in models:\n", + " console.print(Panel(f\"[bold blue]Getting response from {self.gpt_model}...[/bold blue]\"))\n", + " gpt_response = self.get_gpt_response(question)\n", + " responses['gpt'] = gpt_response\n", + " self.history[-1]['responses']['gpt'] = gpt_response\n", + " \n", + " if 'llama' in models:\n", + " console.print(Panel(f\"[bold green]Getting response from {self.llama_model}...[/bold green]\"))\n", + " llama_response = self.get_llama_response(question)\n", + " responses['llama'] = llama_response\n", + " self.history[-1]['responses']['llama'] = llama_response\n", + " display(Markdown(f\"## {self.llama_model} Response\\n{llama_response}\"))\n", + " \n", + " return responses\n", + " \n", + " def compare_responses(self, question: str = None) -> None:\n", + " \"\"\"\n", + " Compare responses from different models side by side.\n", + " \n", + " Args:\n", + " question: Optional specific question to compare responses for\n", + " \"\"\"\n", + " if question:\n", + " responses = self.ask(question)\n", + " else:\n", + " # Use the most recent question from history\n", + " if not self.history:\n", + " console.print(\"[bold red]No questions in history to compare[/bold red]\")\n", + " return\n", + " responses = self.history[-1]['responses']\n", + " question = self.history[-1]['question']\n", + " \n", + " # Create HTML for side-by-side comparison\n", + " html = f\"\"\"\n", + "
\n", + "
\n", + "

{self.gpt_model}

\n", + "
{responses.get('gpt', 'No response')}
\n", + "
\n", + "
\n", + "

{self.llama_model}

\n", + "
{responses.get('llama', 'No response')}
\n", + "
\n", + "
\n", + " \"\"\"\n", + " display(HTML(html))\n", + " \n", + " def show_performance_metrics(self) -> None:\n", + " \"\"\"\n", + " Display performance metrics for the models.\n", + " \"\"\"\n", + " if not self.response_times['gpt'] and not self.response_times['llama']:\n", + " console.print(\"[bold yellow]No performance data available yet[/bold yellow]\")\n", + " return\n", + " \n", + " # Create DataFrame for metrics\n", + " data = {\n", + " 'Model': [],\n", + " 'Response Time (s)': []\n", + " }\n", + " \n", + " for model, times in self.response_times.items():\n", + " for t in times:\n", + " data['Model'].append(model)\n", + " data['Response Time (s)'].append(t)\n", + " \n", + " df = pd.DataFrame(data)\n", + " \n", + " # Calculate statistics\n", + " stats = df.groupby('Model')['Response Time (s)'].agg(['mean', 'min', 'max', 'count'])\n", + " \n", + " # Display statistics\n", + " console.print(\"\\n[bold]Performance Statistics:[/bold]\")\n", + " console.print(stats)\n", + " \n", + " # Create visualization\n", + " plt.figure(figsize=(10, 6))\n", + " \n", + " # Box plot\n", + " ax = plt.subplot(1, 2, 1)\n", + " df.boxplot(column='Response Time (s)', by='Model', ax=ax)\n", + " plt.title('Response Time Distribution')\n", + " plt.suptitle('')\n", + " \n", + " # Bar chart for average times\n", + " ax = plt.subplot(1, 2, 2)\n", + " stats['mean'].plot(kind='bar', ax=ax, color=['#4285F4', '#34A853'])\n", + " plt.title('Average Response Time')\n", + " plt.ylabel('Seconds')\n", + " \n", + " plt.tight_layout()\n", + " plt.show()\n", + " \n", + " def save_history(self, filename: str = 'tutor_history.json') -> None:\n", + " \"\"\"\n", + " Save the question and response history to a file.\n", + " \n", + " Args:\n", + " filename: The filename to save to\n", + " \"\"\"\n", + " try:\n", + " with open(filename, 'w') as f:\n", + " json.dump(self.history, f, indent=2)\n", + " console.print(f\"[bold green]History saved to {filename}[/bold green]\")\n", + " except Exception as e:\n", + " console.print(f\"[bold red]Error saving history:[/bold red] {str(e)}\")\n", + " \n", + " def load_history(self, filename: str = 'tutor_history.json') -> None:\n", + " \"\"\"\n", + " Load question and response history from a file.\n", + " \n", + " Args:\n", + " filename: The filename to load from\n", + " \"\"\"\n", + " try:\n", + " with open(filename, 'r') as f:\n", + " self.history = json.load(f)\n", + " console.print(f\"[bold green]History loaded from {filename}[/bold green]\")\n", + " except FileNotFoundError:\n", + " console.print(f\"[bold yellow]History file {filename} not found[/bold yellow]\")\n", + " except Exception as e:\n", + " console.print(f\"[bold red]Error loading history:[/bold red] {str(e)}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
LLM Tutor initialized successfully!\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;32mLLM Tutor initialized successfully!\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Create a tutor instance\n", + "tutor = LLMTutor()\n", + "console.print(\"[bold green]LLM Tutor initialized successfully![/bold green]\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "25a36470-a68f-40f6-bea1-d2ebb173c015", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       " Question:                                                                                                       \n",
+       "                                                                                                                 \n",
+       " Given a list of dictionaries called 'books', write code to find and print all information                       \n",
+       " about the book titled 'Mastery' by Robert Greene.                                                               \n",
+       "                                                                                                                 \n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[34m╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\u001b[0m\n", + "\u001b[34m│\u001b[0m \u001b[1mQuestion:\u001b[0m \u001b[34m│\u001b[0m\n", + "\u001b[34m│\u001b[0m \u001b[34m│\u001b[0m\n", + "\u001b[34m│\u001b[0m Given a list of dictionaries called 'books', write code to find and print all information \u001b[34m│\u001b[0m\n", + "\u001b[34m│\u001b[0m about the book titled 'Mastery' by Robert Greene. \u001b[34m│\u001b[0m\n", + "\u001b[34m│\u001b[0m \u001b[34m│\u001b[0m\n", + "\u001b[34m╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Define your question here\n", + "question = \"\"\"\n", + "Given a list of dictionaries called 'books', write code to find and print all information \n", + "about the book titled 'Mastery' by Robert Greene.\n", + "\"\"\"\n", + "\n", + "console.print(Panel(f\"[bold]Question:[/bold]\\n{question}\", border_style=\"blue\"))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "bceaeaf9-4d08-4380-b757-597b851dd8ca", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       "│ Getting response from gpt-4o-mini...                                                                            │\n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n", + "│ \u001b[1;34mGetting response from gpt-4o-mini...\u001b[0m │\n", + "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "To find and print all information about the book titled \"Mastery\" by Robert Greene from a list of dictionaries called `books`, we can write a function that iterates through the list, checks for the specific title and author, and then prints the information if a match is found. Here's a step-by-step explanation followed by the code.\n", + "\n", + "### Steps to Follow:\n", + "\n", + "1. **Structure of the Data**: \n", + " Each book in the `books` list is a dictionary. We need to understand how the book's information is structured. A typical dictionary might look like this:\n", + " \n", + " {\n", + " 'title': 'Mastery',\n", + " 'author': 'Robert Greene',\n", + " 'year': 2012,\n", + " 'genre': 'Non-fiction',\n", + " 'isbn': '978-0143124177'\n", + " }\n", + " \n", + "\n", + "2. **Iterate through the List**:\n", + " We will use a loop to go through each book in the `books` list. \n", + "\n", + "3. **Check for Conditions**:\n", + " For each book (dictionary), we need to check if the 'title' is 'Mastery' and the 'author' is 'Robert Greene'. \n", + "\n", + "4. **Print the Details**: \n", + " If we find a match, we will print all the details of that book.\n", + "\n", + "### Example Code\n", + "\n", + "Here’s a Python code snippet that accomplishes this:\n", + "\n", + "\n", + "# Sample list of dictionaries representing books\n", + "books = [\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'year': 2012, 'genre': 'Non-fiction', 'isbn': '978-0143124177'},\n", + " {'title': 'The 48 Laws of Power', 'author': 'Robert Greene', 'year': 1998, 'genre': 'Non-fiction', 'isbn': '978-0140280197'},\n", + " {'title': 'The Art of War', 'author': 'Sun Tzu', 'year': '5th century BC', 'genre': 'Philosophy', 'isbn': '978-1590302255'}\n", + "]\n", + "\n", + "# Function to find and print information about the book titled 'Mastery' by Robert Greene\n", + "def find_book(books):\n", + " for book in books:\n", + " # Check if the title and author match\n", + " if book.get('title') == 'Mastery' and book.get('author') == 'Robert Greene':\n", + " # Print the entire dictionary if a match is found\n", + " print(\"Found book:\")\n", + " for key, value in book.items():\n", + " print(f\"{key}: {value}\")\n", + " return # Exit the function after finding the book\n", + " print(\"Book not found.\") # Optional: Print if the book is not in the list\n", + "\n", + "# Call the function\n", + "find_book(books)\n", + "\n", + "\n", + "### Explanation of the Code:\n", + "\n", + "1. **Data Structure**: The `books` variable is initialized as a list containing dictionary elements, where each dictionary represents a book.\n", + "\n", + "2. **Function Definition**: The function `find_book(books)` takes the list of books as an argument.\n", + "\n", + "3. **Iteration**: The `for` loop iterates over each book in the `books` list.\n", + "\n", + "4. **Finding the Match**: It checks if the title and author of the current book (retrieved using the `get` method to avoid `KeyError`) match 'Mastery' and 'Robert Greene'.\n", + "\n", + "5. **Printing Details**: If a match is found, it prints out the key-value pairs from the dictionary in a formatted manner.\n", + "\n", + "6. **Exit after Finding**: The `return` statement ensures that the function exits as soon as the book is found.\n", + "\n", + "7. **Not Found Condition**: If no book matches the criteria, it prints \"Book not found.\"\n", + "\n", + "### Conclusion\n", + "This method is efficient for small to moderately sized lists of dictionaries. If you have a very large dataset, consider using more efficient search algorithms or data structures like dictionaries for faster lookups, but the above approach should work well for typical use cases." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       "│ Getting response from llama3.2...                                                                               │\n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n", + "│ \u001b[1;32mGetting response from llama3.2...\u001b[0m │\n", + "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "## llama3.2 Response\n", + "Here's an example of how you can achieve this using Python:\n", + "\n", + "**Problem Statement**\n", + "\n", + "Given a list of dictionaries called `books`, where each dictionary represents a book with its title, author, publication year, etc., write code to find and print all information about the book titled `'Mastery'` by Robert Greene.\n", + "\n", + "**Example Input Data**\n", + "```python\n", + "books = [\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'publication_year': 2012, 'genre': 'Self-Help'},\n", + " {'title': 'The 48 Laws of Power', 'author': 'Robert Greene', 'publication_year': 2007, 'genre': 'Non-Fiction'},\n", + " {'title': 'To Kill a Mockingbird', 'author': 'Harper Lee', 'publication_year': 1960, 'genre': 'Classic Fiction'},\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'publication_year': 2018, 'genre': 'Self-Help'} # duplicate title\n", + "]\n", + "```\n", + "**Solution**\n", + "\n", + "Here's the Python code that finds and prints all information about the book titled `'Mastery'` by Robert Greene:\n", + "```python\n", + "# Define a function to find books with a specific title and author\n", + "def find_book(books, title, author):\n", + " \"\"\"\n", + " Find all books in the list that match the given title and author.\n", + "\n", + " Args:\n", + " books (list): List of dictionaries representing books.\n", + " title (str): Title of the book to search for.\n", + " author (str): Author of the book to search for.\n", + "\n", + " Returns:\n", + " list: List of dictionaries representing the found books.\n", + " \"\"\"\n", + " return [book for book in books if book['title'] == title and book['author'] == author]\n", + "\n", + "# Define a function to print book information\n", + "def print_book_info(book):\n", + " \"\"\"\n", + " Print all information about a single book.\n", + "\n", + " Args:\n", + " book (dict): Dictionary representing the book.\n", + " \"\"\"\n", + " print(f\"Title: {book['title']}\")\n", + " print(f\"Author: {book['author']}\")\n", + " print(f\"Publication Year: {book['publication_year']}\")\n", + " print(f\"Genre: {book['genre']}\\n\")\n", + "\n", + "# Find and print information about the book titled 'Mastery' by Robert Greene\n", + "target_title = \"Mastery\"\n", + "target_author = \"Robert Greene\"\n", + "\n", + "found_books = find_book(books, target_title, target_author)\n", + "\n", + "if found_books:\n", + " for i, book in enumerate(found_books):\n", + " print(f\"Book {i+1}:\")\n", + " print_book_info(book)\n", + "else:\n", + " print(f\"No books found with title '{target_title}' by author '{target_author}'.\")\n", + "```\n", + "**Explanation**\n", + "\n", + "The solution consists of two functions:\n", + "\n", + "1. `find_book`: This function takes a list of dictionaries representing books, as well as the title and author to search for. It uses a list comprehension to find all books that match the given criteria and returns them.\n", + "2. `print_book_info`: This function takes a single dictionary representing a book and prints its information.\n", + "\n", + "In the example code, we define the `books` list with some sample data. We then call the `find_book` function to find all books with the title `'Mastery'` by Robert Greene. If found books are returned, we iterate over them and print their information using the `print_book_info` function.\n", + "\n", + "Note that if there are duplicate titles in the input data, only one book will be returned by the `find_book` function, as dictionaries cannot have duplicate keys." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Get responses from both models\n", + "responses = tutor.ask(question)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + "
\n", + "

gpt-4o-mini

\n", + "
To find and print all information about the book titled \"Mastery\" by Robert Greene from a list of dictionaries called `books`, we can write a function that iterates through the list, checks for the specific title and author, and then prints the information if a match is found. Here's a step-by-step explanation followed by the code.\n", + "\n", + "### Steps to Follow:\n", + "\n", + "1. **Structure of the Data**: \n", + " Each book in the `books` list is a dictionary. We need to understand how the book's information is structured. A typical dictionary might look like this:\n", + " ```python\n", + " {\n", + " 'title': 'Mastery',\n", + " 'author': 'Robert Greene',\n", + " 'year': 2012,\n", + " 'genre': 'Non-fiction',\n", + " 'isbn': '978-0143124177'\n", + " }\n", + " ```\n", + "\n", + "2. **Iterate through the List**:\n", + " We will use a loop to go through each book in the `books` list. \n", + "\n", + "3. **Check for Conditions**:\n", + " For each book (dictionary), we need to check if the 'title' is 'Mastery' and the 'author' is 'Robert Greene'. \n", + "\n", + "4. **Print the Details**: \n", + " If we find a match, we will print all the details of that book.\n", + "\n", + "### Example Code\n", + "\n", + "Here’s a Python code snippet that accomplishes this:\n", + "\n", + "```python\n", + "# Sample list of dictionaries representing books\n", + "books = [\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'year': 2012, 'genre': 'Non-fiction', 'isbn': '978-0143124177'},\n", + " {'title': 'The 48 Laws of Power', 'author': 'Robert Greene', 'year': 1998, 'genre': 'Non-fiction', 'isbn': '978-0140280197'},\n", + " {'title': 'The Art of War', 'author': 'Sun Tzu', 'year': '5th century BC', 'genre': 'Philosophy', 'isbn': '978-1590302255'}\n", + "]\n", + "\n", + "# Function to find and print information about the book titled 'Mastery' by Robert Greene\n", + "def find_book(books):\n", + " for book in books:\n", + " # Check if the title and author match\n", + " if book.get('title') == 'Mastery' and book.get('author') == 'Robert Greene':\n", + " # Print the entire dictionary if a match is found\n", + " print(\"Found book:\")\n", + " for key, value in book.items():\n", + " print(f\"{key}: {value}\")\n", + " return # Exit the function after finding the book\n", + " print(\"Book not found.\") # Optional: Print if the book is not in the list\n", + "\n", + "# Call the function\n", + "find_book(books)\n", + "```\n", + "\n", + "### Explanation of the Code:\n", + "\n", + "1. **Data Structure**: The `books` variable is initialized as a list containing dictionary elements, where each dictionary represents a book.\n", + "\n", + "2. **Function Definition**: The function `find_book(books)` takes the list of books as an argument.\n", + "\n", + "3. **Iteration**: The `for` loop iterates over each book in the `books` list.\n", + "\n", + "4. **Finding the Match**: It checks if the title and author of the current book (retrieved using the `get` method to avoid `KeyError`) match 'Mastery' and 'Robert Greene'.\n", + "\n", + "5. **Printing Details**: If a match is found, it prints out the key-value pairs from the dictionary in a formatted manner.\n", + "\n", + "6. **Exit after Finding**: The `return` statement ensures that the function exits as soon as the book is found.\n", + "\n", + "7. **Not Found Condition**: If no book matches the criteria, it prints \"Book not found.\"\n", + "\n", + "### Conclusion\n", + "This method is efficient for small to moderately sized lists of dictionaries. If you have a very large dataset, consider using more efficient search algorithms or data structures like dictionaries for faster lookups, but the above approach should work well for typical use cases.
\n", + "
\n", + "
\n", + "

llama3.2

\n", + "
Here's an example of how you can achieve this using Python:\n", + "\n", + "**Problem Statement**\n", + "\n", + "Given a list of dictionaries called `books`, where each dictionary represents a book with its title, author, publication year, etc., write code to find and print all information about the book titled `'Mastery'` by Robert Greene.\n", + "\n", + "**Example Input Data**\n", + "```python\n", + "books = [\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'publication_year': 2012, 'genre': 'Self-Help'},\n", + " {'title': 'The 48 Laws of Power', 'author': 'Robert Greene', 'publication_year': 2007, 'genre': 'Non-Fiction'},\n", + " {'title': 'To Kill a Mockingbird', 'author': 'Harper Lee', 'publication_year': 1960, 'genre': 'Classic Fiction'},\n", + " {'title': 'Mastery', 'author': 'Robert Greene', 'publication_year': 2018, 'genre': 'Self-Help'} # duplicate title\n", + "]\n", + "```\n", + "**Solution**\n", + "\n", + "Here's the Python code that finds and prints all information about the book titled `'Mastery'` by Robert Greene:\n", + "```python\n", + "# Define a function to find books with a specific title and author\n", + "def find_book(books, title, author):\n", + " \"\"\"\n", + " Find all books in the list that match the given title and author.\n", + "\n", + " Args:\n", + " books (list): List of dictionaries representing books.\n", + " title (str): Title of the book to search for.\n", + " author (str): Author of the book to search for.\n", + "\n", + " Returns:\n", + " list: List of dictionaries representing the found books.\n", + " \"\"\"\n", + " return [book for book in books if book['title'] == title and book['author'] == author]\n", + "\n", + "# Define a function to print book information\n", + "def print_book_info(book):\n", + " \"\"\"\n", + " Print all information about a single book.\n", + "\n", + " Args:\n", + " book (dict): Dictionary representing the book.\n", + " \"\"\"\n", + " print(f\"Title: {book['title']}\")\n", + " print(f\"Author: {book['author']}\")\n", + " print(f\"Publication Year: {book['publication_year']}\")\n", + " print(f\"Genre: {book['genre']}\\n\")\n", + "\n", + "# Find and print information about the book titled 'Mastery' by Robert Greene\n", + "target_title = \"Mastery\"\n", + "target_author = \"Robert Greene\"\n", + "\n", + "found_books = find_book(books, target_title, target_author)\n", + "\n", + "if found_books:\n", + " for i, book in enumerate(found_books):\n", + " print(f\"Book {i+1}:\")\n", + " print_book_info(book)\n", + "else:\n", + " print(f\"No books found with title '{target_title}' by author '{target_author}'.\")\n", + "```\n", + "**Explanation**\n", + "\n", + "The solution consists of two functions:\n", + "\n", + "1. `find_book`: This function takes a list of dictionaries representing books, as well as the title and author to search for. It uses a list comprehension to find all books that match the given criteria and returns them.\n", + "2. `print_book_info`: This function takes a single dictionary representing a book and prints its information.\n", + "\n", + "In the example code, we define the `books` list with some sample data. We then call the `find_book` function to find all books with the title `'Mastery'` by Robert Greene. If found books are returned, we iterate over them and print their information using the `print_book_info` function.\n", + "\n", + "Note that if there are duplicate titles in the input data, only one book will be returned by the `find_book` function, as dictionaries cannot have duplicate keys.
\n", + "
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Compare responses side by side\n", + "tutor.compare_responses()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "f00c09c3-1728-442b-94f1-548255fb95b4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n",
+       "Performance Statistics:\n",
+       "
\n" + ], + "text/plain": [ + "\n", + "\u001b[1mPerformance Statistics:\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
            mean        min        max  count\n",
+       "Model                                        \n",
+       "gpt    14.672200  14.672200  14.672200      1\n",
+       "llama  79.891858  79.891858  79.891858      1\n",
+       "
\n" + ], + "text/plain": [ + " mean min max count\n", + "Model \n", + "gpt \u001b[1;36m14.672200\u001b[0m \u001b[1;36m14.672200\u001b[0m \u001b[1;36m14.672200\u001b[0m \u001b[1;36m1\u001b[0m\n", + "llama \u001b[1;36m79.891858\u001b[0m \u001b[1;36m79.891858\u001b[0m \u001b[1;36m79.891858\u001b[0m \u001b[1;36m1\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAI/CAYAAABqNbq7AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAY3dJREFUeJzt3XtYFHX///HXqrhyVjywUKiIqJma5wNWagbmqbzN7rvsIFndlpoRFUre1WoGZqlklnYwxcos89DhLoPKKFPvUDO9yUxvj5lIeQJFEXV+f/hjvq6ggTIuK8/Hde11OZ/PZ2bes+sy+9qZnbEZhmEIAAAAAACUuyruLgAAAAAAgMsVoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGxdlzpw5stls5qNatWoKCQnR7bffrs2bN7u7PI8TGxvr8nye6xEbG6tvvvlGNptN33zzjbvLNnXv3t2ssUqVKvL391fjxo1122236cMPP9SpU6eKzdOwYUPFxsaWaT0rVqyQ0+nUwYMHyzTf2esqeg4//PDDMi3nfPLz8+V0Okt8XYreL9u3by+39QFARTRt2jTZbDa1aNHC3aVUOGfuK202m2rUqKHmzZtrwoQJOn78uLvL8zil+dxU9HkpNjZWDRs2dHfJqISqubsAXB5mz56tZs2a6dixY/r+++/13HPPadmyZfrll19Uq1Ytd5fnMZ566ik9+OCD5vTatWs1YsQIJSUlqUePHmZ73bp1VbduXa1cuVLNmzd3R6nn1KhRI7377ruSpCNHjmjbtm1asmSJbrvtNl133XX65JNPFBgYaI5fvHixAgICyrSOFStWaNy4cYqNjVXNmjVLPd+FrKus8vPzNW7cOEmnP1idqW/fvlq5cqVCQkIsrQEA3O2tt96SJGVlZek///mPOnXq5OaKKpYz95V//PGH3nzzTT311FPauXOnXn/9dTdX51lWrlzpMv3ss89q2bJl+vrrr13amzdvrrCwMD3yyCOXsjxAEqEb5aRFixZq3769pNNB4+TJk3rmmWe0ZMkS3XvvvW6uznNEREQoIiLCnD527JgkKTIyUp07dy42vqQ2d/P29i5W1/3336/Zs2dr6NCh+uc//6n333/f7GvTpo3lNR09elTe3t6XZF3nU/RlCQBczlavXq2ffvpJffv21b///W/NmjXrkoduwzB07NgxeXt7X9L1ltbZ+8revXurefPmSk1N1bRp01SjRg03VudZzv7MUbduXVWpUqXEz0hWf/EOnAunl8MSRQF87969Lu2rV6/WzTffrKCgINWoUUNt2rTRBx984DImPz9fjz/+uMLDw1WjRg0FBQWpffv2eu+998wxsbGx8vPzU1ZWlnr27ClfX1/VrVtXI0eOVH5+vsvyjh07psTERIWHh6t69eq64oorNGLEiGKnJjds2FD9+vXT0qVL1bZtW3l7e6tZs2bmt/Vlqa+023oxSjq9vOh5+eWXX9SrVy/5+voqJCREEydOlCStWrVK1157rXx9fdWkSROlpqYWW252draGDRumK6+8UtWrV1d4eLjGjRunEydOXFS99957r/r06aMFCxZox44dZvvZp3yfOnVKEyZMUNOmTeXt7a2aNWuqVatWeumllyRJTqdTTzzxhCQpPDzc5bSxouX169dPixYtUps2bVSjRg3zyPO5TmU/duyY4uPj5XA45O3trW7duunHH390GdO9e/diR64luZyqtn37djNUjxs3zuXnANK5Ty9/6623dM0115j/n/72t79p48aNxdbj5+enLVu2qE+fPvLz81NYWJgee+wxFRQUnPN5B4BLbdasWZKkiRMnKioqSvPnzzf3zYWFhapXr57uvvvuYvMdPHhQ3t7eio+PN9tyc3PNfW7RPjwuLk5Hjhxxmddms2nkyJGaOXOmrrrqKtntdnMfN27cOHXq1ElBQUEKCAhQ27ZtNWvWLBmG4bKMgoICPfbYY3I4HPLx8dH111+vNWvWlLjvKO99ZbVq1dS6dWsdP37c5fOJYRh69dVX1bp1a3l7e6tWrVoaNGiQtm7d6jL/jz/+qH79+qlevXqy2+0KDQ1V37599dtvvxV7jl577TU1adJEdrtdzZs31/z584vV89///le33HKLatWqpRo1aqh169bFPjMUfQ557733NHbsWIWGhiogIEA33nijNm3aVOb6SrutF6Ok08uLnpfZs2ebnz3at2+vVatWyTAMvfDCCwoPD5efn59uuOEGbdmypdhyv/zyS/Xs2VMBAQHy8fFR165d9dVXX5Vb3fB8HOmGJbZt2yZJatKkidm2bNky3XTTTerUqZNmzpypwMBAzZ8/X//4xz+Un59v7tDi4+P19ttva8KECWrTpo2OHDmi//73v9q3b5/LOgoLC9WnTx8NGzZMY8aM0YoVKzRhwgTt2LFDn3zyiaTTf8AHDBigr776SomJibruuuu0fv16PfPMM1q5cqVWrlwpu91uLvOnn37SY489pjFjxig4OFhvvvmm7rvvPjVu3FjXX399qesr7bZaobCwUAMHDtSDDz6oJ554QvPmzVNiYqJyc3O1cOFCjR49WldeeaVefvllxcbGqkWLFmrXrp2k0x8iOnbsqCpVqujpp59WRESEVq5cqQkTJmj79u2aPXv2RdV2880367PPPtN3332nBg0alDhm0qRJcjqd+te//qXrr79ehYWF+uWXX8wPIffff7/279+vl19+WYsWLTJP1T7zNPu1a9dq48aN+te//qXw8HD5+vqet64nn3xSbdu21ZtvvqlDhw7J6XSqe/fu+vHHH9WoUaNSb19ISIiWLl2qm266Sffdd5/uv/9+STrv0e3k5GQ9+eSTuuOOO5ScnKx9+/bJ6XSqS5cuyszMVGRkpDm2sLBQN998s+677z499thj+vbbb/Xss88qMDBQTz/9dKnrBACrHD16VO+99546dOigFi1aaOjQobr//vu1YMECDRkyRF5eXrrrrrs0c+ZMvfLKKy5HHt977z0dO3bMPEMuPz9f3bp102+//aYnn3xSrVq1UlZWlp5++mlt2LBBX375pWw2mzn/kiVL9N133+npp5+Ww+FQvXr1JJ3+QnTYsGGqX7++pNNfQD/88MPavXu3y9/Oe++9V++//74SEhJ0ww036Oeff9bf/vY35ebmumyjVfvKbdu2qWbNmi77jGHDhmnOnDkaNWqUnn/+ee3fv1/jx49XVFSUfvrpJwUHB+vIkSOKjo5WeHi4XnnlFQUHBys7O1vLli1TXl6eyzo+/vhjLVu2TOPHj5evr69effVV3XHHHapWrZoGDRokSdq0aZOioqJUr149TZs2TbVr19Y777yj2NhY7d27VwkJCS7LfPLJJ9W1a1e9+eabys3N1ejRo9W/f39t3LhRVatWLXV9pdlWq3z66af68ccfNXHiRNlsNo0ePVp9+/bVkCFDtHXrVk2fPl2HDh1SfHy8br31Vq1bt878v/fOO+/onnvu0S233KLU1FR5eXnptddeU69evfTFF1+oZ8+eltUND2IAF2H27NmGJGPVqlVGYWGhkZeXZyxdutRwOBzG9ddfbxQWFppjmzVrZrRp08alzTAMo1+/fkZISIhx8uRJwzAMo0WLFsaAAQPOu94hQ4YYkoyXXnrJpf25554zJBnLly83DMMwli5dakgyJk2a5DLu/fffNyQZr7/+utnWoEEDo0aNGsaOHTvMtqNHjxpBQUHGsGHDzLbS1Ffabf0ry5YtMyQZCxYsOGffsmXLzLai52XhwoVmW2FhoVG3bl1DkrF27Vqzfd++fUbVqlWN+Ph4s23YsGGGn5+fy3NgGIbx4osvGpKMrKys89bbrVs34+qrrz5n/+eff25IMp5//nmzrUGDBsaQIUPM6X79+hmtW7c+73peeOEFQ5Kxbdu2Yn0NGjQwqlatamzatKnEvjPXVfQctm3b1jh16pTZvn37dsPLy8u4//77XbatW7duxZY5ZMgQo0GDBub0H3/8YUgynnnmmWJji94vRXUfOHDA8Pb2Nvr06eMybufOnYbdbjcGDx7ssh5JxgcffOAytk+fPkbTpk2LrQsA3GHu3LmGJGPmzJmGYRhGXl6e4efnZ1x33XXmmPXr1xfbBxuGYXTs2NFo166dOZ2cnGxUqVLFyMzMdBn34YcfGpKMzz77zGyTZAQGBhr79+8/b30nT540CgsLjfHjxxu1a9c2//ZnZWUZkozRo0e7jH/vvfcMSS77jvLaVxYWFhqFhYXGnj17jKefftrleTMMw1i5cqUhyZg8ebLL/Lt27TK8vb2NhIQEwzAMY/Xq1YYkY8mSJeddryTD29vbyM7ONttOnDhhNGvWzGjcuLHZdvvttxt2u93YuXOny/y9e/c2fHx8jIMHDxqG8X/70LP3YR988IEhyVi5cmWp6yvttpbGkCFDDF9f33P2nbnPNozTz4vD4TAOHz5sti1ZssSQZLRu3drl80FKSoohyVi/fr1hGIZx5MgRIygoyOjfv7/LMk+ePGlcc801RseOHUtdNy5vnF6OctG5c2d5eXnJ399fN910k2rVqqWPPvpI1aqdPpliy5Yt+uWXX3TnnXdKkk6cOGE++vTpoz179pinInXs2FGff/65xowZo2+++UZHjx4953qLlldk8ODBkk4faZZkXkTj7CPLt912m3x9fYud+tO6dWvzm3BJqlGjhpo0aeJyOvRf1VeWbbWCzWZTnz59zOlq1aqpcePGCgkJcflNc1BQkOrVq+eybZ9++ql69Oih0NBQl7p79+4tScrIyLio2oyzTuUrSceOHfXTTz9p+PDh+uKLL4odYSiNVq1auZxl8VcGDx7scrSkQYMGioqKMv8fWWXlypU6evRosf+fYWFhuuGGG4r9/7TZbOrfv79LW6tWrVxeQwBwp1mzZsnb21u33367JMnPz0+33XabvvvuO/OuJi1btlS7du1cjghv3LhRP/zwg4YOHWq2ffrpp2rRooVat27tsk/q1atXiXfvuOGGG0q8eOvXX3+tG2+8UYGBgapataq8vLz09NNPa9++fcrJyZH0f/u3v//97y7zDho0yPwsc2ZdF7uvzMrKkpeXl7y8vBQSEqLx48crMTFRw4YNc1mPzWbTXXfd5bIeh8Oha665xtz+xo0bq1atWho9erRmzpypn3/++Zzr7dmzp8sR46pVq+of//iHtmzZYp7q/fXXX6tnz54KCwtzmTc2Nlb5+fnFLlx28803u0y3atVKksx9U2nqK+22WqVHjx4uZ8VdddVVkk7/1v7MzwdF7UXbtmLFCu3fv19DhgxxqfvUqVO66aablJmZWeynEKicCN0oF3PnzlVmZqa+/vprDRs2TBs3btQdd9xh9hf9tvvxxx83dzJFj+HDh0uS/vzzT0mnbzMyevRoLVmyRD169FBQUJAGDBhQ7BZk1apVU+3atV3aHA6HJJmneu/bt0/VqlUrdnqvzWaTw+Eodsr62cuTJLvd7hKs/6q+smyrFXx8fIpdgKV69eoKCgoqNrZ69ermxdqKav/kk0+K1X311VeXS91FO6nQ0NBzjklMTNSLL76oVatWqXfv3qpdu7Z69uyp1atXl3o9Zb06eNH/m7Pbzv7/Ud6Kll9SvaGhocXWX9Jra7fbXV5DAHCXLVu26Ntvv1Xfvn1lGIYOHjyogwcPmqctn3mNlKFDh2rlypX65ZdfJJ2+C4rdbi/22WH9+vXF9kn+/v4yDKPYPqmkv6U//PCDYmJiJElvvPGGvv/+e2VmZmrs2LGSZO7fi/7enn0Kc0mfNcpjXxkREaHMzEz98MMPWrBgga655holJye7/L567969MgxDwcHBxda1atUqcz2BgYHKyMhQ69at9eSTT+rqq69WaGionnnmGRUWFrqs91z7uzOfg3379p1zv3TmuCJnPz9FP9srem5LU19pt9UqZ39Gql69+nnbi/a7RZ/5Bg0aVKzu559/XoZhaP/+/ZbWDs/Ab7pRLq666irz4mk9evTQyZMn9eabb+rDDz/UoEGDVKdOHUmnA9XAgQNLXEbTpk0lSb6+vho3bpzGjRunvXv3mkeV+/fvb+6cpdNHkPft2+fyxz47O1vS/+0AateurRMnTuiPP/5wCd6GYSg7O1sdOnQo87b+VX1l2daKpk6dOmrVqpWee+65EvvPF5ZL4+OPP5bNZjN/H1+SatWqKT4+XvHx8Tp48KC+/PJLPfnkk+rVq5d27dolHx+fv1zPmd9Kl0bR/5uz2878v1WjRg0dOnSo2LiL+SBQtPw9e/YU6/v999/N/0sA4AneeustGYahDz/8UB9++GGx/tTUVE2YMEFVq1bVHXfcofj4eM2ZM0fPPfec3n77bQ0YMMDlSHWdOnXk7e1d7IKmZ/afqaS//fPnz5eXl5c+/fRTly8tlyxZ4jKu6O/x3r17dcUVV5jtRZ81zl7vxe4ra9SoYX5u6tChg3r06KGrr75acXFx6tevn/z8/FSnTh3ZbDZ99913LtefKXJmW8uWLTV//nwZhqH169drzpw5Gj9+vLy9vTVmzBhz3Ln2d2c+B7Vr1z7nfqlo+8vqr+ory7ZWJEXPxcsvv3zOO8pY+Vt0eA5CNywxadIkLVy4UE8//bQGDhyopk2bKjIyUj/99JOSkpJKvZzg4GDFxsbqp59+UkpKivLz811C17vvvqtRo0aZ0/PmzZP0f/dH7tmzpyZNmqR33nlHjz76qDlu4cKFOnLkyEVf3KKk+i50WyuCfv366bPPPlNERES531999uzZ+vzzzzV48GCXU/jPp2bNmho0aJB2796tuLg4bd++Xc2bNy/2LfrFeu+99xQfH29+YNuxY4dWrFihe+65xxzTsGFDLViwQAUFBeb69+3bpxUrVrhcCKgstXXp0kXe3t565513dNttt5ntv/32m77++mvz6BAAVHQnT55UamqqIiIi9Oabbxbr//TTTzV58mR9/vnn6tevn2rVqqUBAwZo7ty56tKli7Kzs11OLZdO75OSkpJUu3ZthYeHX1BdNptN1apVU9WqVc22o0eP6u2333YZV/Rl8Pvvv6+2bdua7R9++GGxK5Jbsa+sXbu2Jk6cqHvvvVcvv/yyEhMT1a9fP02cOFG7d+8udtr7udhsNl1zzTWaOnWq5syZo7Vr17r0f/XVV9q7d68ZBE+ePKn3339fERERuvLKKyWd/uy0ePFi/f777y5fIMydO1c+Pj4XdbvSc9V3IdtaEXTt2lU1a9bUzz//rJEjR7q7HFRghG5YolatWkpMTFRCQoLmzZunu+66S6+99pp69+6tXr16KTY2VldccYX279+vjRs3au3atVqwYIEkqVOnTurXr59atWqlWrVqaePGjXr77bfVpUsXl8BdvXp1TZ48WYcPH1aHDh3Mq5f37t1b1157rSQpOjpavXr10ujRo5Wbm6uuXbuaVy9v06ZNibcs+Sulqa+021rRjB8/Xunp6YqKitKoUaPUtGlTHTt2TNu3b9dnn32mmTNnmjvlczl69KhWrVpl/nvr1q1asmSJPv30U3Xr1k0zZ8487/z9+/c37/tet25d7dixQykpKWrQoIF5Je+WLVtKkl566SXzarhNmzaVv7//BW13Tk6O/va3v+mBBx7QoUOH9Mwzz6hGjRpKTEw0x9x999167bXXdNddd+mBBx7Qvn37NGnSpGL3/PT391eDBg300UcfqWfPngoKClKdOnWK3aJEOv2lwlNPPaUnn3xS99xzj+644w7t27dP48aNU40aNfTMM89c0PYAwKX2+eef6/fff9fzzz9f4u0VW7RooenTp2vWrFnq16+fpNOnmL///vsaOXKkrrzySt14440u88TFxWnhwoW6/vrr9eijj6pVq1Y6deqUdu7cqbS0ND322GN/ef/vvn37asqUKRo8eLD++c9/at++fXrxxReLHTm9+uqrdccdd2jy5MmqWrWqbrjhBmVlZWny5MkKDAxUlSr/94vM8thXluSee+7RlClT9OKLL2rEiBHq2rWr/vnPf+ree+/V6tWrdf3118vX11d79uzR8uXL1bJlSz300EP69NNP9eqrr2rAgAFq1KiRDMPQokWLdPDgQUVHR7uso06dOrrhhhv01FNPmVcv/+WXX1xOa3/mmWfM360//fTTCgoK0rvvvqt///vfmjRpkgIDA8u0XaWpr7TbWtH4+fnp5Zdf1pAhQ7R//34NGjRI9erV0x9//KGffvpJf/zxh2bMmOHuMlERuOf6bbhcFF2N+ewrixrG6St/169f34iMjDROnDhhGIZh/PTTT8bf//53o169eoaXl5fhcDiMG264weVqnWPGjDHat29v1KpVy7Db7UajRo2MRx991Pjzzz/NMUVXply/fr3RvXt3w9vb2wgKCjIeeughl6tPFtUxevRoo0GDBoaXl5cREhJiPPTQQ8aBAwdcxjVo0MDo27dvse04+6rVpamvtNv6Vy7k6uUlXbHzXFcVL2mb//jjD2PUqFFGeHi44eXlZQQFBRnt2rUzxo4dW+y5LWk9ksyHr6+v0ahRI2PQoEHGggULSrxq+9lXFJ88ebIRFRVl1KlTx6hevbpRv35947777jO2b9/uMl9iYqIRGhpqVKlSxeV5ONfrWNK6ip7Dt99+2xg1apRRt25dw263G9ddd52xevXqYvOnpqYaV111lVGjRg2jefPmxvvvv1/ilVC//PJLo02bNobdbne56u3ZVy8v8uabbxqtWrUyqlevbgQGBhq33HJLsavfnuu1feaZZwz+lANwtwEDBhjVq1c3cnJyzjnm9ttvN6pVq2ZePfvkyZNGWFiYIckYO3ZsifMcPnzY+Ne//mU0bdrU/BvZsmVL49FHH3W5CrckY8SIESUu46233jKaNm1q7rOTk5ONWbNmFft7fOzYMSM+Pt6oV6+eUaNGDaNz587GypUrjcDAQOPRRx91WebF7ivPdaePf//734YkY9y4cS71d+rUyfD19TW8vb2NiIgI45577jH3U7/88otxxx13GBEREYa3t7cRGBhodOzY0ZgzZ47Lsoueo1dffdWIiIgwvLy8jGbNmhnvvvtusTo2bNhg9O/f3wgMDDSqV69uXHPNNcbs2bNdxpzrM8q2bdsMSeb40tZXmm0tjQu5evnZ/3eKtuGFF14o1TZnZGQYffv2NYKCggwvLy/jiiuuMPr27Vvi5zdUTjbDKMXlhIEKJjY2Vh9++KEOHz7s7lIAAMBlasWKFerataveffdd8w4pnspms2nEiBGaPn26u0sBKh1OLwcAAECll56erpUrV6pdu3by9vbWTz/9pIkTJyoyMvKcF0YFgNIgdAMAAKDSCwgIUFpamlJSUpSXl6c6deqod+/eSk5OLna7RgAoC04vBwAAAADAIlX+eggAAAAAALgQhG4AAAAAACxC6AYAAAAAwCIV7kJqp06d0u+//y5/f3/ZbDZ3lwMAQLkxDEN5eXkKDQ1VlSqV53tv9u0AgMtRaffrFS50//777woLC3N3GQAAWGbXrl268sor3V3GJcO+HQBwOfur/XqFC93+/v6SThceEBDg5mpQFoWFhUpLS1NMTIy8vLzcXQ5wWeP95plyc3MVFhZm7usqC/btAIDLUWn36xUudBeddhYQEMCO2cMUFhbKx8dHAQEBhADAYrzfPFtlO8WafTsA4HL2V/v1yvODMgAAAAAALjFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYJEyhe4TJ07oX//6l8LDw+Xt7a1GjRpp/PjxOnXqlDnGMAw5nU6FhobK29tb3bt3V1ZWVrkXDgAAAABARVem0P38889r5syZmj59ujZu3KhJkybphRde0Msvv2yOmTRpkqZMmaLp06crMzNTDodD0dHRysvLK/fiAQDAhePLdAAArFem0L1y5Urdcsst6tu3rxo2bKhBgwYpJiZGq1evlnR6x5ySkqKxY8dq4MCBatGihVJTU5Wfn6958+ZZsgEAAODC8GU6AADWK1Povvbaa/XVV1/p119/lST99NNPWr58ufr06SNJ2rZtm7KzsxUTE2POY7fb1a1bN61YsaIcywYAABeLL9MBALBetbIMHj16tA4dOqRmzZqpatWqOnnypJ577jndcccdkqTs7GxJUnBwsMt8wcHB2rFjR4nLLCgoUEFBgTmdm5srSSosLFRhYWFZyoObFb1evG6A9Xi/eaaK9npde+21mjlzpn799Vc1adLE/DI9JSVF0l9/mT5s2DA3VQ4AgOcoU+h+//339c4772jevHm6+uqrtW7dOsXFxSk0NFRDhgwxx9lsNpf5DMMo1lYkOTlZ48aNK9aelpYmHx+fspSHCiI9Pd3dJQAV2uFCKXN/ngpsJZ+eeyz/iHZv3VSqZb3y9afn7b+iUVPV8PEtsS+4ur/a1PQv1XpQPvLz891dggsrvkyXzv2FOgAAlVGZQvcTTzyhMWPG6Pbbb5cktWzZUjt27FBycrKGDBkih8Mh6fROOiQkxJwvJyen2A67SGJiouLj483p3NxchYWFKSYmRgEBAWXeILhPYWGh0tPTFR0dLS8vL3eXA1RYH6z+TZ9tf0n2ul+VPKC6pLbls649WnXOvoI/eur2aKci6pYcylH+Klr4tOLLdOncX6gDAFAZlSl05+fnq0oV15+BV61a1bzKaXh4uBwOh9LT09WmTRtJ0vHjx5WRkaHnn3++xGXa7XbZ7fZi7V5eXgQ3D8VrB5xf71ZXKP9UrA6fuLnE/iOHc7V5w5rzLsM4ZSh77145goNlq3Lu8BPZsp18/Ur+ArNphyvULLRmqevGxatofxut+DJdOvcX6gBwsTq8P8jdJeACZP7jQ3eX4FZlCt39+/fXc889p/r16+vqq6/Wjz/+qClTpmjo0KGSTn8THhcXp6SkJEVGRioyMlJJSUny8fHR4MGDLdkAAPA0Qb7V9UDX1ucf1HfAebsLCwv12WefqU+fPhUuyMFzWPFlunTuL9QBAKiMyhS6X375ZT311FMaPny4cnJyFBoaqmHDhunpp582xyQkJOjo0aMaPny4Dhw4oE6dOiktLU3+/vxuEACAioQv0wEAsF6ZQre/v79SUlLMq5qWxGazyel0yul0XmRpAADASnyZDgCA9coUugEAwOWDL9MBALBelb8eAgAAAAAALgShGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAIBKqmHDhrLZbMUeI0aMkCQZhiGn06nQ0FB5e3ure/fuysrKcnPVAAB4FkI3AACVVGZmpvbs2WM+0tPTJUm33XabJGnSpEmaMmWKpk+frszMTDkcDkVHRysvL8+dZQMA4FEI3QAAVFJ169aVw+EwH59++qkiIiLUrVs3GYahlJQUjR07VgMHDlSLFi2Umpqq/Px8zZs3z92lAwDgMQjdAABAx48f1zvvvKOhQ4fKZrNp27Ztys7OVkxMjDnGbrerW7duWrFihRsrBQDAs1RzdwEAAMD9lixZooMHDyo2NlaSlJ2dLUkKDg52GRccHKwdO3acd1kFBQUqKCgwp3Nzc8u3WAAAPAhHugEAgGbNmqXevXsrNDTUpd1ms7lMG4ZRrO1sycnJCgwMNB9hYWHlXi8AAJ6C0A0AQCW3Y8cOffnll7r//vvNNofDIen/jngXycnJKXb0+2yJiYk6dOiQ+di1a1f5Fw0AgIcgdAMAUMnNnj1b9erVU9++fc228PBwORwO84rm0unffWdkZCgqKuq8y7Pb7QoICHB5AABQWZUpdHM/TwAALi+nTp3S7NmzNWTIEFWr9n+XerHZbIqLi1NSUpIWL16s//73v4qNjZWPj48GDx7sxooBAPAsZbqQWmZmpk6ePGlO//e//1V0dHSx+3nOmTNHTZo00YQJExQdHa1NmzbJ39+/fCsHAAAX7csvv9TOnTs1dOjQYn0JCQk6evSohg8frgMHDqhTp05KS0tjnw4AQBmUKXTXrVvXZXrixInnvJ+nJKWmpio4OFjz5s3TsGHDyq9qAABQLmJiYmQYRol9NptNTqdTTqfz0hYFAMBl5IJ/0839PAEAAAAAOL8Lvk93ed3P81z38iwsLFRhYeGFlgc3KHq9eN0A6/F+80y8XgAAVD4XHLrL636eycnJGjduXLH2tLQ0+fj4XGh5cKMzr3QLwFq83zxLfn6+u0sAAACX2AWF7qL7eS5atMhsO/N+niEhIWb7X93PMzExUfHx8eZ0bm6uwsLCFBMTwy1GPExhYaHS09MVHR0tLy8vd5cDXNZ4v3mmorO5AABA5XFBofuv7ufZpk0bSf93P8/nn3/+nMuy2+2y2+3F2r28vPgg6aF47YBLh/ebZ+G1AgCg8ilz6C7N/TwjIyMVGRmppKQk7ucJAAAAAKi0yhy6uZ8nAAAAAAClU+bQzf08AQAAAAAonQu+TzcAAAAAADg/QjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAJXY7t27ddddd6l27dry8fFR69attWbNGrPfMAw5nU6FhobK29tb3bt3V1ZWlhsrBgDAsxC6AQCopA4cOKCuXbvKy8tLn3/+uX7++WdNnjxZNWvWNMdMmjRJU6ZM0fTp05WZmSmHw6Ho6Gjl5eW5r3AAADxINXcXAAAA3OP5559XWFiYZs+ebbY1bNjQ/LdhGEpJSdHYsWM1cOBASVJqaqqCg4M1b948DRs27FKXDACAx+FINwAAldTHH3+s9u3b67bbblO9evXUpk0bvfHGG2b/tm3blJ2drZiYGLPNbrerW7duWrFixTmXW1BQoNzcXJcHAACVFaEbAIBKauvWrZoxY4YiIyP1xRdf6MEHH9SoUaM0d+5cSVJ2drYkKTg42GW+4OBgs68kycnJCgwMNB9hYWHWbQQAABUcoRsAgErq1KlTatu2rZKSktSmTRsNGzZMDzzwgGbMmOEyzmazuUwbhlGs7UyJiYk6dOiQ+di1a5cl9QMA4AkI3QAAVFIhISFq3ry5S9tVV12lnTt3SpIcDockFTuqnZOTU+zo95nsdrsCAgJcHgAAVFaEbgAAKqmuXbtq06ZNLm2//vqrGjRoIEkKDw+Xw+FQenq62X/8+HFlZGQoKirqktYKAICn4urlAABUUo8++qiioqKUlJSkv//97/rhhx/0+uuv6/XXX5d0+rTyuLg4JSUlKTIyUpGRkUpKSpKPj48GDx7s5uoBAPAMhG4AACqpDh06aPHixUpMTNT48eMVHh6ulJQU3XnnneaYhIQEHT16VMOHD9eBAwfUqVMnpaWlyd/f342VAwDgOQjdAABUYv369VO/fv3O2W+z2eR0OuV0Oi9dUQAAXEb4TTcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYpc+jevXu37rrrLtWuXVs+Pj5q3bq11qxZY/YbhiGn06nQ0FB5e3ure/fuysrKKteiAQAAAADwBGUK3QcOHFDXrl3l5eWlzz//XD///LMmT56smjVrmmMmTZqkKVOmaPr06crMzJTD4VB0dLTy8vLKu3YAAAAAACq0amUZ/PzzzyssLEyzZ8822xo2bGj+2zAMpaSkaOzYsRo4cKAkKTU1VcHBwZo3b56GDRtWPlUDAAAAAOAByhS6P/74Y/Xq1Uu33XabMjIydMUVV2j48OF64IEHJEnbtm1Tdna2YmJizHnsdru6deumFStWlBi6CwoKVFBQYE7n5uZKkgoLC1VYWHhBGwX3KHq9eN0A6/F+80y8XgAAVD5lCt1bt27VjBkzFB8fryeffFI//PCDRo0aJbvdrnvuuUfZ2dmSpODgYJf5goODtWPHjhKXmZycrHHjxhVrT0tLk4+PT1nKQwWRnp7u7hKASoP3m2fJz893dwkAAOASK1PoPnXqlNq3b6+kpCRJUps2bZSVlaUZM2bonnvuMcfZbDaX+QzDKNZWJDExUfHx8eZ0bm6uwsLCFBMTo4CAgLKUBzcrLCxUenq6oqOj5eXl5e5ygMsa7zfPVHQ2FwAAqDzKFLpDQkLUvHlzl7arrrpKCxculCQ5HA5JUnZ2tkJCQswxOTk5xY5+F7Hb7bLb7cXavby8+CDpoXjtgEuH95tn4bUCAKDyKdPVy7t27apNmza5tP36669q0KCBJCk8PFwOh8PldMfjx48rIyNDUVFR5VAuAAAAAACeo0xHuh999FFFRUUpKSlJf//73/XDDz/o9ddf1+uvvy7p9GnlcXFxSkpKUmRkpCIjI5WUlCQfHx8NHjzYkg0AAAAAAKCiKlPo7tChgxYvXqzExESNHz9e4eHhSklJ0Z133mmOSUhI0NGjRzV8+HAdOHBAnTp1Ulpamvz9/cu9eAAAAAAAKrIyhW5J6tevn/r163fOfpvNJqfTKafTeTF1AQAAAADg8cr0m24AAAAAAFB6hG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAoJJyOp2y2WwuD4fDYfYbhiGn06nQ0FB5e3ure/fuysrKcmPFAAB4HkI3AACV2NVXX609e/aYjw0bNph9kyZN0pQpUzR9+nRlZmbK4XAoOjpaeXl5bqwYAADPQugGAKASq1atmhwOh/moW7eupNNHuVNSUjR27FgNHDhQLVq0UGpqqvLz8zVv3jw3Vw0AgOcgdAMAUIlt3rxZoaGhCg8P1+23366tW7dKkrZt26bs7GzFxMSYY+12u7p166YVK1a4q1wAADxONXcXAAAA3KNTp06aO3eumjRpor1792rChAmKiopSVlaWsrOzJUnBwcEu8wQHB2vHjh3nXW5BQYEKCgrM6dzc3PIvHgAAD0HoBgCgkurdu7f575YtW6pLly6KiIhQamqqOnfuLEmy2Wwu8xiGUaztbMnJyRo3blz5FwwAgAfi9HIAACBJ8vX1VcuWLbV582bzKuZFR7yL5OTkFDv6fbbExEQdOnTIfOzatcuymgEAqOgI3QAAQNLp08I3btyokJAQhYeHy+FwKD093ew/fvy4MjIyFBUVdd7l2O12BQQEuDwAAKisOL0cAIBK6vHHH1f//v1Vv3595eTkaMKECcrNzdWQIUNks9kUFxenpKQkRUZGKjIyUklJSfLx8dHgwYPdXToAAB6D0A0AQCX122+/6Y477tCff/6punXrqnPnzlq1apUaNGggSUpISNDRo0c1fPhwHThwQJ06dVJaWpr8/f3dXDkAAJ6D0A0AQCU1f/788/bbbDY5nU45nc5LUxAAAJchftMNAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAIAHOnr0qPLz883pHTt2KCUlRWlpaW6sCgAAnI3QDQCAB7rllls0d+5cSdLBgwfVqVMnTZ48WbfccotmzJjh5uoAAEARQjcAAB5o7dq1uu666yRJH374oYKDg7Vjxw7NnTtX06ZNc3N1AACgCKEbAAAPlJ+fL39/f0lSWlqaBg4cqCpVqqhz587asWOHm6sDAABFCN0AAHigxo0ba8mSJdq1a5e++OILxcTESJJycnIUEBDg5uoAAECRMoVup9Mpm83m8nA4HGa/YRhyOp0KDQ2Vt7e3unfvrqysrHIvGgCAyu7pp5/W448/roYNG6pTp07q0qWLpNNHvdu0aePm6gAAQJEyH+m++uqrtWfPHvOxYcMGs2/SpEmaMmWKpk+frszMTDkcDkVHRysvL69ciwYAoLIbNGiQdu7cqdWrV2vp0qVme8+ePTV16lQ3VgYAAM5UrcwzVKvmcnS7iGEYSklJ0dixYzVw4EBJUmpqqoKDgzVv3jwNGzbs4qsFAAAmh8NRbJ/csWNHN1UDAABKUubQvXnzZoWGhsput6tTp05KSkpSo0aNtG3bNmVnZ5u/KZMku92ubt26acWKFecM3QUFBSooKDCnc3NzJUmFhYUqLCwsa3lwo6LXi9cNsB7vN890sa9X0ZfapbFo0aKLWhcAACgfZQrdnTp10ty5c9WkSRPt3btXEyZMUFRUlLKyspSdnS1JCg4Odpmn6BYm55KcnKxx48YVa09LS5OPj09ZykMFkZ6e7u4SgEqD95tnyc/Pv6j5AwMDzX8bhqHFixcrMDBQ7du3lyStWbNGBw8eLFM4BwAA1ipT6O7du7f575YtW6pLly6KiIhQamqqOnfuLEmy2Wwu8xiGUaztTImJiYqPjzenc3NzFRYWppiYGK6+6mEKCwuVnp6u6OhoeXl5ubsc4LLG+80zFZ3NdaFmz55t/nv06NH6+9//rpkzZ6pq1aqSpJMnT2r48OHsPwEAqEDKfHr5mXx9fdWyZUtt3rxZAwYMkCRlZ2crJCTEHJOTk1Ps6PeZ7Ha77HZ7sXYvLy8+SHooXjvg0uH95lnK87V66623tHz5cjNwS1LVqlUVHx+vqKgovfDCC+W2LgAAcOEu6j7dBQUF2rhxo0JCQhQeHi6Hw+FyquPx48eVkZGhqKioiy4UAAD8nxMnTmjjxo3F2jdu3KhTp065oSIAAFCSMh3pfvzxx9W/f3/Vr19fOTk5mjBhgnJzczVkyBDZbDbFxcUpKSlJkZGRioyMVFJSknx8fDR48GCr6gcAoFK69957NXToUG3ZssX8ideqVas0ceJE3XvvvW6uDgAAFClT6P7tt990xx136M8//1TdunXVuXNnrVq1Sg0aNJAkJSQk6OjRoxo+fLgOHDigTp06KS0tTf7+/pYUDwBAZfXiiy/K4XBo6tSp2rNnjyQpJCRECQkJeuyxx9xcHQAAKFKm0D1//vzz9ttsNjmdTjmdzoupCQAA/IUqVaooISFBCQkJ5gXauIAaAAAVz0VdSA0AALgfYRsAgIrroi6kBgAA3GPv3r26++67FRoaqmrVqqlq1aouDwAAUDFwpBsAAA8UGxurnTt36qmnnlJISIhsNpu7SwIAACUgdAMA4IGWL1+u7777Tq1bt3Z3KQAA4Dw4vRwAAA8UFhYmwzDcXQYAAPgLhG4AADxQSkqKxowZo+3bt7u7FAAAcB6cXg4AgAf6xz/+ofz8fEVERMjHx0deXl4u/fv373dTZQAA4EyEbgAAPFBKSoq7SwAAAKVA6AYAwAMNGTLE3SUAAIBSIHQDAOChTp48qSVLlmjjxo2y2Wxq3ry5br75Zu7TDQBABULoBgDAA23ZskV9+vTR7t271bRpUxmGoV9//VVhYWH697//rYiICHeXCAAAxNXLAQDwSKNGjVJERIR27dqltWvX6scff9TOnTsVHh6uUaNGubs8AADw/3GkGwAAD5SRkaFVq1YpKCjIbKtdu7YmTpyorl27urEyAABwJo50AwDggex2u/Ly8oq1Hz58WNWrV3dDRQAAoCSEbgAAPFC/fv30z3/+U//5z39kGIYMw9CqVav04IMP6uabb3Z3eQAA4P8jdAMA4IGmTZumiIgIdenSRTVq1FCNGjXUtWtXNW7cWC+99JK7ywMAAP8fv+kGAMAD1axZUx999JG2bNmijRs3yjAMNW/eXI0bN3Z3aQAA4Awc6QYAwIM1btxY/fv3180333zRgTs5OVk2m01xcXFmm2EYcjqdCg0Nlbe3t7p3766srKyLrBoAgMqD0A0AgAcaNGiQJk6cWKz9hRde0G233Vbm5WVmZur1119Xq1atXNonTZqkKVOmaPr06crMzJTD4VB0dHSJF3EDAADFEboBAPBAGRkZ6tu3b7H2m266Sd9++22ZlnX48GHdeeedeuONN1SrVi2z3TAMpaSkaOzYsRo4cKBatGih1NRU5efna968eRe9DQAAVAaEbgAAPNC5bg3m5eWl3NzcMi1rxIgR6tu3r2688UaX9m3btik7O1sxMTFmm91uV7du3bRixYoLKxwAgEqG0A0AgAdq0aKF3n///WLt8+fPV/PmzUu9nPnz52vt2rVKTk4u1pednS1JCg4OdmkPDg42+0pSUFCg3NxclwcAAJUVVy8HAMADPfXUU7r11lv1v//9TzfccIMk6auvvtJ7772nBQsWlGoZu3bt0iOPPKK0tDTVqFHjnONsNpvLtGEYxdrOlJycrHHjxpWqBgAALncc6QYAwAPdfPPNWrJkibZs2aLhw4frscce02+//aYvv/xSAwYMKNUy1qxZo5ycHLVr107VqlVTtWrVlJGRoWnTpqlatWrmEe6zj2rn5OQUO/p9psTERB06dMh87Nq164K3EwAAT8eRbgAAPFTfvn1LvJhaafXs2VMbNmxwabv33nvVrFkzjR49Wo0aNZLD4VB6erratGkjSTp+/LgyMjL0/PPPn3O5drtddrv9gusCAOByQugGAMBDHTx4UB9++KG2bt2qxx9/XEFBQVq7dq2Cg4N1xRVX/OX8/v7+atGihUubr6+vateubbbHxcUpKSlJkZGRioyMVFJSknx8fDR48GBLtgkAgMsNoRsAAA+0fv163XjjjQoMDNT27dt1//33KygoSIsXL9aOHTs0d+7ccllPQkKCjh49quHDh+vAgQPq1KmT0tLS5O/vXy7LBwDgckfoBgDAA8XHxys2NlaTJk1yCcC9e/e+qKPQ33zzjcu0zWaT0+mU0+m84GUCAFCZcSE1AAA8UGZmpoYNG1as/Yorrjjv7bwAAMClRegGAMAD1ahRo8T7X2/atEl169Z1Q0UAAKAkhG4AADzQLbfcovHjx6uwsFDS6dPAd+7cqTFjxujWW291c3UAAKAIoRsAAA/04osv6o8//lC9evV09OhRdevWTREREfLz89Nzzz3n7vIAAMD/x4XUAADwQAEBAVq+fLm+/vprrV27VqdOnVK7du3Us2dPd5cGAADOwJFuAAA8yH/+8x99/vnn5vQNN9ygunXr6tVXX9Udd9yhf/7znyooKHBjhQAA4EyEbgAAPIjT6dT69evN6Q0bNuiBBx5QdHS0xowZo08++UTJyclurBAAAJyJ0A0AgAdZt26dyynk8+fPV8eOHfXGG28oPj5e06ZN0wcffODGCgEAwJkI3QAAeJADBw4oODjYnM7IyNBNN91kTnfo0EG7du1yR2kAAKAEhG4AADxIcHCwtm3bJkk6fvy41q5dqy5dupj9eXl58vLycld5AADgLIRuAAA8yE033aQxY8bou+++U2Jionx8fHTdddeZ/evXr1dERIQbKwQAAGfilmEAAHiQCRMmaODAgerWrZv8/PyUmpqq6tWrm/1vvfWWYmJi3FghAAA4E6EbAAAPUrduXX333Xc6dOiQ/Pz8VLVqVZf+BQsWyM/Pz03VAQCAsxG6AQDwQIGBgSW2BwUFXeJKAADA+fCbbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCIXFbqTk5Nls9kUFxdnthmGIafTqdDQUHl7e6t79+7Kysq62DoBAAAAAPA4Fxy6MzMz9frrr6tVq1Yu7ZMmTdKUKVM0ffp0ZWZmyuFwKDo6Wnl5eRddLAAAAAAAnuSCQvfhw4d155136o033lCtWrXMdsMwlJKSorFjx2rgwIFq0aKFUlNTlZ+fr3nz5pVb0QAAAAAAeIJqFzLTiBEj1LdvX914442aMGGC2b5t2zZlZ2crJibGbLPb7erWrZtWrFihYcOGFVtWQUGBCgoKzOnc3FxJUmFhoQoLCy+kPLhJ0evF6wZYj/ebZ+L1AgCg8ilz6J4/f77Wrl2rzMzMYn3Z2dmSpODgYJf24OBg7dixo8TlJScna9y4ccXa09LS5OPjU9byUAGkp6e7uwSg0uD95lny8/PdXQIAALjEyhS6d+3apUceeURpaWmqUaPGOcfZbDaXacMwirUVSUxMVHx8vDmdm5ursLAwxcTEKCAgoCzlwc0KCwuVnp6u6OhoeXl5ubsc4LLG+80zFZ3NBQAAKo8yhe41a9YoJydH7dq1M9tOnjypb7/9VtOnT9emTZsknT7iHRISYo7JyckpdvS7iN1ul91uL9bu5eXFB0kPxWsHXDq83zwLrxUAAJVPmS6k1rNnT23YsEHr1q0zH+3bt9edd96pdevWqVGjRnI4HC6nOx4/flwZGRmKiooq9+IBAAAAAKjIynSk29/fXy1atHBp8/X1Ve3atc32uLg4JSUlKTIyUpGRkUpKSpKPj48GDx5cflUDAAAAAOABLujq5eeTkJCgo0ePavjw4Tpw4IA6deqktLQ0+fv7l/eqAAAAAACo0C46dH/zzTcu0zabTU6nU06n82IXDQAAAACARyvTb7oBAAAAAEDpEboBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAgEpqxowZatWqlQICAhQQEKAuXbro888/N/sNw5DT6VRoaKi8vb3VvXt3ZWVlubFiAAA8D6EbAIBK6sorr9TEiRO1evVqrV69WjfccINuueUWM1hPmjRJU6ZM0fTp05WZmSmHw6Ho6Gjl5eW5uXIAADwHoRsAgEqqf//+6tOnj5o0aaImTZroueeek5+fn1atWiXDMJSSkqKxY8dq4MCBatGihVJTU5Wfn6958+a5u3QAADwGoRsAAOjkyZOaP3++jhw5oi5dumjbtm3Kzs5WTEyMOcZut6tbt25asWLFeZdVUFCg3NxclwcAAJUVoRsAgEpsw4YN8vPzk91u14MPPqjFixerefPmys7OliQFBwe7jA8ODjb7ziU5OVmBgYHmIywszLL6AQCo6AjdAABUYk2bNtW6deu0atUqPfTQQxoyZIh+/vlns99ms7mMNwyjWNvZEhMTdejQIfOxa9cuS2oHAMATVHN3AQAAwH2qV6+uxo0bS5Lat2+vzMxMvfTSSxo9erQkKTs7WyEhIeb4nJycYke/z2a322W3260rGgAAD8KRbgAAYDIMQwUFBQoPD5fD4VB6errZd/z4cWVkZCgqKsqNFQIA4Fk40g0AQCX15JNPqnfv3goLC1NeXp7mz5+vb775RkuXLpXNZlNcXJySkpIUGRmpyMhIJSUlycfHR4MHD3Z36QAAeAxCNwAAldTevXt19913a8+ePQoMDFSrVq20dOlSRUdHS5ISEhJ09OhRDR8+XAcOHFCnTp2UlpYmf39/N1cOAIDnIHQDAFBJzZo167z9NptNTqdTTqfz0hQEAMBliN90AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYJEyhe4ZM2aoVatWCggIUEBAgLp06aLPP//c7DcMQ06nU6GhofL29lb37t2VlZVV7kUDAAAAAOAJyhS6r7zySk2cOFGrV6/W6tWrdcMNN+iWW24xg/WkSZM0ZcoUTZ8+XZmZmXI4HIqOjlZeXp4lxQMAAAAAUJGVKXT3799fffr0UZMmTdSkSRM999xz8vPz06pVq2QYhlJSUjR27FgNHDhQLVq0UGpqqvLz8zVv3jyr6gcAAAAAoMKqdqEznjx5UgsWLNCRI0fUpUsXbdu2TdnZ2YqJiTHH2O12devWTStWrNCwYcNKXE5BQYEKCgrM6dzcXElSYWGhCgsLL7Q8uEHR68XrBliP95tn4vUCAKDyKXPo3rBhg7p06aJjx47Jz89PixcvVvPmzbVixQpJUnBwsMv44OBg7dix45zLS05O1rhx44q1p6WlycfHp6zloQJIT093dwlApcH7zbPk5+e7uwQAAHCJlTl0N23aVOvWrdPBgwe1cOFCDRkyRBkZGWa/zWZzGW8YRrG2MyUmJio+Pt6czs3NVVhYmGJiYhQQEFDW8uBGhYWFSk9PV3R0tLy8vNxdDnBZ4/3mmYrO5gIAAJVHmUN39erV1bhxY0lS+/btlZmZqZdeekmjR4+WJGVnZyskJMQcn5OTU+zo95nsdrvsdnuxdi8vLz5IeiheO+DS4f3mWXitAACofC76Pt2GYaigoEDh4eFyOBwupzoeP35cGRkZioqKutjVAAAAAADgccp0pPvJJ59U7969FRYWpry8PM2fP1/ffPONli5dKpvNpri4OCUlJSkyMlKRkZFKSkqSj4+PBg8ebFX9AAAAAABUWGU60r13717dfffdatq0qXr27Kn//Oc/Wrp0qaKjoyVJCQkJiouL0/Dhw9W+fXvt3r1baWlp8vf3t6R4AABw4ZKTk9WhQwf5+/urXr16GjBggDZt2uQyxjAMOZ1OhYaGytvbW927d1dWVpabKgYAwPOU6Uj3rFmzzttvs9nkdDrldDovpiYAAHAJZGRkaMSIEerQoYNOnDihsWPHKiYmRj///LN8fX0lSZMmTdKUKVM0Z84cNWnSRBMmTFB0dLQ2bdrEl+oAAJTCBd+nGwAAeLalS5e6TM+ePVv16tXTmjVrdP3118swDKWkpGjs2LEaOHCgJCk1NVXBwcGaN2+ehg0b5o6yAQDwKBd9ITUAAHB5OHTokCQpKChIkrRt2zZlZ2crJibGHGO329WtWzetWLHCLTUCAOBpONINAABkGIbi4+N17bXXqkWLFpJO3wZUUrFbfwYHB2vHjh3nXFZBQYEKCgrMae5PDgCozDjSDQAANHLkSK1fv17vvfdesT6bzeYybRhGsbYzJScnKzAw0HyEhYWVe70AAHgKQjcAAJXcww8/rI8//ljLli3TlVdeabY7HA5J/3fEu0hOTk6xo99nSkxM1KFDh8zHrl27rCkcAAAPQOgGAKCSMgxDI0eO1KJFi/T1118rPDzcpT88PFwOh0Pp6elm2/Hjx5WRkaGoqKhzLtdutysgIMDlAQBAZcVvugEAqKRGjBihefPm6aOPPpK/v795RDswMFDe3t6y2WyKi4tTUlKSIiMjFRkZqaSkJPn4+Gjw4MFurh4AAM9A6AYAoJKaMWOGJKl79+4u7bNnz1ZsbKwkKSEhQUePHtXw4cN14MABderUSWlpadyjGwCAUiJ0AwBQSRmG8ZdjbDabnE6nnE6n9QUBAHAZ4jfdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARaq5uwAAAABP1jPpiLtLwAX46klfd5cAoJLgSDcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYJEyhe7k5GR16NBB/v7+qlevngYMGKBNmza5jDEMQ06nU6GhofL29lb37t2VlZVVrkUDAIDy8e2336p///4KDQ2VzWbTkiVLXPrZrwMAcHHKFLozMjI0YsQIrVq1Sunp6Tpx4oRiYmJ05MgRc8ykSZM0ZcoUTZ8+XZmZmXI4HIqOjlZeXl65Fw8AAC7OkSNHdM0112j69Okl9rNfBwDg4lQry+ClS5e6TM+ePVv16tXTmjVrdP3118swDKWkpGjs2LEaOHCgJCk1NVXBwcGaN2+ehg0bVn6VAwCAi9a7d2/17t27xD726wAAXLwyhe6zHTp0SJIUFBQkSdq2bZuys7MVExNjjrHb7erWrZtWrFhR4s65oKBABQUF5nRubq4kqbCwUIWFhRdTHi6xoteL1w2wHu83z+Rpr9eF7NcBAICrCw7dhmEoPj5e1157rVq0aCFJys7OliQFBwe7jA0ODtaOHTtKXE5ycrLGjRtXrD0tLU0+Pj4XWh7cKD093d0lAJUG7zfPkp+f7+4SyuRC9uvSub9QBwCgMrrg0D1y5EitX79ey5cvL9Zns9lcpg3DKNZWJDExUfHx8eZ0bm6uwsLCFBMTo4CAgAstD25QWFio9PR0RUdHy8vLy93lAJc13m+eyVPDZ1n269K5v1AHAKAyuqDQ/fDDD+vjjz/Wt99+qyuvvNJsdzgckk5/Mx4SEmK25+TkFPuWvIjdbpfdbi/W7uXlxQdJD8VrB1w6vN88i6e9VheyX5fO/YU6AACVUZmuXm4YhkaOHKlFixbp66+/Vnh4uEt/eHi4HA6Hy+mOx48fV0ZGhqKiosqnYgAAcElc6H7dbrcrICDA5QEAQGVVpiPdI0aM0Lx58/TRRx/J39/f/K1XYGCgvL29ZbPZFBcXp6SkJEVGRioyMlJJSUny8fHR4MGDLdkAAABw4Q4fPqwtW7aY09u2bdO6desUFBSk+vXrs18HAOAilSl0z5gxQ5LUvXt3l/bZs2crNjZWkpSQkKCjR49q+PDhOnDggDp16qS0tDT5+/uXS8EAAKD8rF69Wj169DCni04LHzJkiObMmcN+HQCAi1Sm0G0Yxl+OsdlscjqdcjqdF1oTAAC4RLp3737e/Tv7dQAALk6ZftMNAAAAAABKj9ANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFikmrsLQMWw/8hxLVz3sw6f2F9i/5HDudq8Yc15l2GcMpS9d68WZmXKVsV2znGRLdvJ1y+gxL6mda9Qn+ZNS184AAAAAFRghG5IktKysvXCyjmy1/3q3INCSrGgK6Q9fzFk7Z9LpD9L7ivI7KkmdSaocT2/UqwMAAAAACo2QjckSTFXO5RXGKvDJ24usb8sR7odwcEXfqS7wxUEbgAAAACXDUI3JElBvtX1QNfW5x/Ud8B5uwsLC/XZZ5+pT58+8vLyKrfaAAAAAMBTcSE1AAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAPCXXn31VYWHh6tGjRpq166dvvvuO3eXBACARyB0AwCA83r//fcVFxensWPH6scff9R1112n3r17a+fOne4uDQCACo/QDQAAzmvKlCm67777dP/99+uqq65SSkqKwsLCNGPGDHeXBgBAhVfN3QWczTAMSVJubq6bK0FZFRYWKj8/X7m5ufLy8nJ3OcBljfebZyratxXt6zzB8ePHtWbNGo0ZM8alPSYmRitWrChxnoKCAhUUFJjThw4dknT57ttPHDvi7hJwAXJzT7q7BFyAk/mF7i4BF+By/ftf2v16hQvdeXl5kqSwsDA3VwIAgDXy8vIUGBjo7jJK5c8//9TJkycVHBzs0h4cHKzs7OwS50lOTta4ceOKtbNvR0US+Ky7KwAqj8ChnrHPu1B/tV+vcKE7NDRUu3btkr+/v2w2m7vLQRnk5uYqLCxMu3btUkBAgLvLAS5rvN88k2EYysvLU2hoqLtLKbOz98mGYZxzP52YmKj4+Hhz+tSpU9q/f79q167Nvt2D8HcGuHR4v3mm0u7XK1zorlKliq688kp3l4GLEBAQwB8L4BLh/eZ5POUId5E6deqoatWqxY5q5+TkFDv6XcRut8tut7u01axZ06oSYTH+zgCXDu83z1Oa/ToXUgMAAOdUvXp1tWvXTunp6S7t6enpioqKclNVAAB4jgp3pBsAAFQs8fHxuvvuu9W+fXt16dJFr7/+unbu3KkHH3zQ3aUBAFDhEbpRbux2u5555plipxQCKH+833Ap/eMf/9C+ffs0fvx47dmzRy1atNBnn32mBg0auLs0WIi/M8Clw/vt8mYzPOm+JQAAAAAAeBB+0w0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAHCJde/eXXFxcZKkhg0bKiUlxa31AAAAwDqEblwyNptNS5YscXcZAAAAAHDJELoBAAAgSWrUqJH27dtXrP3gwYNq1KiRGyoCAM9H6Eap5OXl6c4775Svr69CQkI0derUYqfIPvvssxo8eLD8/PwUGhqql19+2Zy/YcOGkqS//e1vstls5jQAV1OmTFHLli3l6+ursLAwDR8+XIcPHzb758yZo5o1a+rTTz9V06ZN5ePjo0GDBunIkSNKTU1Vw4YNVatWLT388MM6efKkOd8777yj9u3by9/fXw6HQ4MHD1ZOTo47NhFABbZ9+3aXvx1FCgoKtHv3bjdUBFz+8vPz9csvv2j9+vUuD1w+qrm7AHiG+Ph4ff/99/r4448VHBysp59+WmvXrlXr1q3NMS+88IKefPJJOZ1OffHFF3r00UfVrFkzRUdHKzMzU/Xq1dPs2bN10003qWrVqu7bGKACq1KliqZNm6aGDRtq27ZtGj58uBISEvTqq6+aY/Lz8zVt2jTNnz9feXl5GjhwoAYOHKiaNWvqs88+09atW3Xrrbfq2muv1T/+8Q9J0vHjx/Xss8+qadOmysnJ0aOPPqrY2Fh99tln7tpUABXIxx9/bP77iy++UGBgoDl98uRJffXVV3xhDpSzP/74Q/fee68+//zzEvtL+gIMnonQjb+Ul5en1NRUzZs3Tz179pQkzZ49W6GhoS7junbtqjFjxkiSmjRpou+//15Tp05VdHS06tatK0mqWbOmHA7Hpd0AwIMUnT0iSeHh4Xr22Wf10EMPuYTuwsJCzZgxQxEREZKkQYMG6e2339bevXvl5+en5s2bq0ePHlq2bJkZuocOHWrO36hRI02bNk0dO3bU4cOH5efnd2k2DkCFNWDAAEmnr78yZMgQlz4vLy81bNhQkydPdkNlwOUrLi5OBw4c0KpVq9SjRw8tXrxYe/fu1YQJE3i/XWYI3fhLW7duVWFhoTp27Gi2BQYGqmnTpi7junTpUmyaqzIDZbNs2TIlJSXp559/Vm5urk6cOKFjx47pyJEj8vX1lST5+PiYgVuSgoOD1bBhQ5fwHBwc7HL6+I8//iin06l169Zp//79OnXqlCRp586dat68+SXaOgAVVdHfhPDwcGVmZqpOnTpurgi4/H399df66KOP1KFDB1WpUkUNGjRQdHS0AgIClJycrL59+7q7RJQTftONv2QYhqTT336X1H4+Z88D4Nx27NihPn36qEWLFlq4cKHWrFmjV155RdLpo9tFvLy8XOaz2WwlthV9iD5y5IhiYmLk5+end955R5mZmVq8eLGk06edA0CRbdu2EbiBS+TIkSOqV6+eJCkoKEh//PGHJKlly5Zau3atO0tDOSN04y9FRETIy8tLP/zwg9mWm5urzZs3u4xbtWpVselmzZqZ015eXvw2BTiP1atX68SJE5o8ebI6d+6sJk2a6Pfff7/o5f7yyy/6888/NXHiRF133XVq1qwZF1EDcE5fffWV+vXrp4iICDVu3Fj9+vXTl19+6e6ygMtO06ZNtWnTJklS69at9dprr2n37t2aOXOmQkJC3FwdyhOhG3/J399fQ4YM0RNPPKFly5YpKytLQ4cOVZUqVVyOZH///feaNGmSfv31V73yyitasGCBHnnkEbO/YcOG+uqrr5Sdna0DBw64Y1OACi0iIkInTpzQyy+/rK1bt+rtt9/WzJkzL3q59evXV/Xq1c3lfvzxx3r22WfLoWIAl5vp06frpptukr+/vx555BGNGjVKAQEB6tOnj6ZPn+7u8oDLSlxcnPbs2SNJeuaZZ7R06VLVr19f06ZNU1JSkpurQ3kidKNUpkyZoi5duqhfv3668cYb1bVrV1111VWqUaOGOeaxxx7TmjVr1KZNGz377LOaPHmyevXqZfZPnjxZ6enpCgsLU5s2bdyxGUCF1rp1a02ZMkXPP/+8WrRooXfffVfJyckXvdy6detqzpw5WrBggZo3b66JEyfqxRdfLIeKAVxukpOTNXXqVL333nsaNWqURo0apXnz5mnq1KmEAKCc3XnnnYqNjZUktWnTRtu3b1dmZqZ27dplXggVlwebUZof5gJnOXLkiK644gpNnjxZ9913nxo2bKi4uDiXKy8DAADP4u/vrx9//FGNGzd2ad+8ebPatGmjw4cPu6kyAPBcXL0cpfLjjz/ql19+UceOHXXo0CGNHz9eknTLLbe4uTIAAFBebr75Zi1evFhPPPGES/tHH32k/v37u6kq4PJkGIY+/PBDLVu2TDk5OeYFUIssWrTITZWhvBG6UWovvviiNm3apOrVq6tdu3b67rvvuMIpAACXkauuukrPPfecvvnmG/NWoKtWrdL333+vxx57TNOmTTPHjho1yl1lApeFRx55RK+//rp69Oih4OBg7vpzGeP0cgAAAEg6fZ/u0rDZbNq6davF1QCXt6CgIL3zzjvq06ePu0uBxTjSDQAAAEmn79MN4NIIDAxUo0aN3F0GLgGOdAMAAECSFB8fX2K7zWZTjRo1FBkZqZtvvllBQUGXuDLg8pOamqqlS5fqrbfekre3t7vLgYUI3QAAAJAk9ejRQ2vXrtXJkyfVtGlTGYahzZs3q2rVqmrWrJk2bdokm82m7777TldffbW7ywU8Wn5+vgYOHKjvv/9eDRs2lJeXl0v/2rVr3VQZyhunlwMAAEDS6buSBAUFafbs2QoICJAk5ebm6r777tO1116rBx54QIMHD1Z8fLy++OILN1cLeLbY2FitWbNGd911FxdSu8xxpBsAAACSpCuuuELp6elq3ry5S3tWVpZiYmK0e/durV27VjExMfrzzz/dVCVwefD19dUXX3yha6+91t2lwGJV3F0AgIrnm2++kc1m08GDB0s9T8OGDZWSkmJZTQAA6x06dEg5OTnF2v/44w/l5uZKkmrWrKnjx49f6tKAy05YWJh5Rgkub4RuwAPFxsbKZrPpwQcfLNY3fPhw2Ww2xcbGXvrCAAAe7ZZbbtHQoUO1ePFi/fbbb9q9e7cWL16s++67TwMGDJAk/fDDD2rSpIl7CwUuA5MnT1ZCQoK2b9/u7lJgMX7TDXiosLAwzZ8/X1OnTjWveHns2DG99957ql+/vpurAwB4otdee02PPvqobr/9dp04cUKSVK1aNQ0ZMkRTp06VJDVr1kxvvvmmO8sELgt33XWX8vPzFRERIR8fn2IXUtu/f7+bKkN5I3QDHqpt27baunWrFi1apDvvvFOStGjRIoWFhbnc87GgoEBPPPGE5s+fr9zcXLVv315Tp05Vhw4dzDGfffaZ4uLitGvXLnXu3FlDhgwptr4VK1ZozJgxyszMVJ06dfS3v/1NycnJ8vX1tX5jAQCXhJ+fn9544w1NnTpVW7dulWEYioiIkJ+fnzmmdevW7isQuIzws7zKg9ANeLB7771Xs2fPNkP3W2+9paFDh+qbb74xxyQkJGjhwoVKTU1VgwYNNGnSJPXq1UtbtmxRUFCQdu3apYEDB+rBBx/UQw89pNWrV+uxxx5zWc+GDRvUq1cvPfvss5o1a5b++OMPjRw5UiNHjtTs2bMv5SYDAC4BPz8/tWrVyt1lAJe1kg5y4PLEb7oBD3b33Xdr+fLl2r59u3bs2KHvv/9ed911l9l/5MgRzZgxQy+88IJ69+6t5s2b64033pC3t7dmzZolSZoxY4YaNWqkqVOnqmnTprrzzjuL/R78hRde0ODBgxUXF6fIyEhFRUVp2rRpmjt3ro4dO3YpNxkAAOCyc/ToUeXm5ro8cPngSDfgwerUqaO+ffsqNTVVhmGob9++qlOnjtn/v//9T4WFheratavZ5uXlpY4dO2rjxo2SpI0bN6pz584u94bs0qWLy3rWrFmjLVu26N133zXbDMPQqVOntG3bNl111VVWbSIAAMBl6ciRIxo9erQ++OAD7du3r1j/yZMn3VAVrEDoBjzc0KFDNXLkSEnSK6+84tJnGIYkuQTqovaitqIx53Pq1CkNGzZMo0aNKtbHRdsAAADKLiEhQcuWLdOrr76qe+65R6+88op2796t1157TRMnTnR3eShHnF4OeLibbrpJx48f1/Hjx9WrVy+XvsaNG6t69epavny52VZYWKjVq1ebR6ebN2+uVatWucx39nTbtm2VlZWlxo0bF3tUr17doi0DAAC4fH3yySd69dVXNWjQIFWrVk3XXXed/vWvfykpKcnl7EJ4PkI34OGqVq2qjRs3auPGjapatapLn6+vrx566CE98cQTWrp0qX7++Wc98MADys/P13333SdJevDBB/W///1P8fHx2rRpk+bNm6c5c+a4LGf06NFauXKlRowYoXXr1mnz5s36+OOP9fDDD1+qzQQAALis7N+/X+Hh4ZKkgIAA8xZh1157rb799lt3loZyRugGLgMBAQEKCAgosW/ixIm69dZbdffdd6tt27basmWLvvjiC9WqVUvS6dPDFy5cqE8++UTXXHONZs6cqaSkJJdltGrVShkZGdq8ebOuu+46tWnTRk899ZRCQkIs3zYAAIDLUaNGjbR9+3ZJp888/OCDDySdPgJes2ZN9xWGcmczSvODTgAAAABAuZk6daqqVq2qUaNGadmyZerbt69OnjypEydOaMqUKXrkkUfcXSLKCaEbAAAAANxs586dWr16tSIiInTNNde4uxyUI0I3AAAAAAAW4ZZhAAAAAHAJTJs2rdRjS7pVKzwTR7oBAAAA4BIoulr5X7HZbNq6davF1eBSIXQDAAAAAGARTi8HAAAAgEsgPj6+VONsNpsmT55scTW4VAjdAAAAAHAJ/Pjjj6UaZ7PZLK4ElxKnlwMAAAAAYJEq7i4AAAAAAIDLFaEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAIDL2DfffCObzaaDBw+Wep6GDRsqJSXFspqAyoTQDQAAALhRbGysbDabHnzwwWJ9w4cPl81mU2xs7KUvDEC5IHQDAAAAbhYWFqb58+fr6NGjZtuxY8f03nvvqX79+m6sDMDFInQDAAAAbta2bVvVr19fixYtMtsWLVqksLAwtWnTxmwrKCjQqFGjVK9ePdWoUUPXXnutMjMzXZb12WefqUmTJvL29laPHj20ffv2YutbsWKFrr/+enl7eyssLEyjRo3SkSNHLNs+oDIjdAMAAAAVwL333qvZs2eb02+99ZaGDh3qMiYhIUELFy5Uamqq1q5dq8aNG6tXr17av3+/JGnXrl0aOHCg+vTpo3Xr1un+++/XmDFjXJaxYcMG9erVSwMHDtT69ev1/vvva/ny5Ro5cqT1GwlUQoRuAAAAoAK4++67tXz5cm3fvl07duzQ999/r7vuusvsP3LkiGbMmKEXXnhBvXv3VvPmzfXGG2/I29tbs2bNkiTNmDFDjRo10tSpU9W0aVPdeeedxX4P/sILL2jw4MGKi4tTZGSkoqKiNG3aNM2dO1fHjh27lJsMVArV3F0AAAAAAKlOnTrq27evUlNTZRiG+vbtqzp16pj9//vf/1RYWKiuXbuabV5eXurYsaM2btwoSdq4caM6d+4sm81mjunSpYvLetasWaMtW7bo3XffNdsMw9CpU6e0bds2XXXVVVZtIlApEboBAACACmLo0KHmad6vvPKKS59hGJLkEqiL2ovaisacz6lTpzRs2DCNGjWqWB8XbQPKH6eXAwAAABXETTfdpOPHj+v48ePq1auXS1/jxo1VvXp1LV++3GwrLCzU6tWrzaPTzZs316pVq1zmO3u6bdu2ysrKUuPGjYs9qlevbtGWAZUXoRsAAACoIKpWraqNGzdq48aNqlq1qkufr6+vHnroIT3xxBNaunSpfv75Zz3wwAPKz8/XfffdJ0l68MEH9b///U/x8fHatGmT5s2bpzlz5rgsZ/To0Vq5cqVGjBihdevWafPmzfr444/18MMPX6rNBCoVQjcAAABQgQQEBCggIKDEvokTJ+rWW2/V3XffrbZt22rLli364osvVKtWLUmnTw9fuHChPvnkE11zzTWaOXOmkpKSXJbRqlUrZWRkaPPmzbruuuvUpk0bPfXUUwoJCbF824DKyGaU5ocfAAAAAACgzDjSDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWOT/AQCTJnuW6jTEAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Show performance metrics\n", + "tutor.show_performance_metrics()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "41f5122e-96e9-4fda-9b4f-e8cf4caff552", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
History saved to my_tutor_session.json\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;32mHistory saved to my_tutor_session.json\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Save history to a file\n", + "tutor.save_history(\"my_tutor_session.json\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "4aa6afbf-1cc1-4ed1-a65f-14ee02ce278f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       " New Question:                                                                                                   \n",
+       " Explain how to implement a binary search algorithm in Python.                                                   \n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[32m╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\u001b[0m\n", + "\u001b[32m│\u001b[0m \u001b[1mNew Question:\u001b[0m \u001b[32m│\u001b[0m\n", + "\u001b[32m│\u001b[0m Explain how to implement a binary search algorithm in Python. \u001b[32m│\u001b[0m\n", + "\u001b[32m╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       "│ Getting response from gpt-4o-mini...                                                                            │\n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n", + "│ \u001b[1;34mGetting response from gpt-4o-mini...\u001b[0m │\n", + "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Binary search is an efficient algorithm for finding a target value within a sorted list. It works by repeatedly dividing the search interval in half. If the target value is less than the element in the middle of the interval, the search continues on the lower half; otherwise, it continues on the upper half. This process is continued until the target value is found or the search interval is empty.\n", + "\n", + "Here's a detailed explanation and implementation of the binary search algorithm in Python:\n", + "\n", + "### Step-by-Step Implementation\n", + "\n", + "1. **Prerequisites**:\n", + " - Ensure the input list is sorted. Binary search can only be performed on a sorted list.\n", + " \n", + "2. **Set Initial Variables**:\n", + " - Define two pointers, `low` and `high`, which represent the starting and ending indices of the search range in the list.\n", + "\n", + "3. **Calculate the Middle Index**:\n", + " - Use the formula `mid = (low + high) // 2` to find the middle index.\n", + "\n", + "4. **Comparison**:\n", + " - Compare the middle element with the target:\n", + " - If the middle element is equal to the target, return the index of the middle element.\n", + " - If the target is less than the middle element, narrow the search to the left half by setting `high = mid - 1`.\n", + " - If the target is greater than the middle element, narrow the search to the right half by setting `low = mid + 1`.\n", + "\n", + "5. **Loop Until the Target is Found or the Interval is Empty**:\n", + " - Repeat the above steps until the `low` pointer exceeds the `high` pointer. If the target is not found, return a value indicating that the target is not present (commonly -1).\n", + "\n", + "### Implementation in Python\n", + "\n", + "Here’s a complete Python implementation of the binary search algorithm:\n", + "\n", + "\n", + "def binary_search(arr, target):\n", + " low = 0\n", + " high = len(arr) - 1\n", + "\n", + " while low <= high:\n", + " # Find the middle index\n", + " mid = (low + high) // 2\n", + " \n", + " # Check if the target is present at mid\n", + " if arr[mid] == target:\n", + " return mid # Target found, return the index\n", + " \n", + " # If the target is smaller than the mid element,\n", + " # it can only be present in the left subarray\n", + " elif arr[mid] > target:\n", + " high = mid - 1\n", + " \n", + " # If the target is larger than the mid element,\n", + " # it can only be present in the right subarray\n", + " else:\n", + " low = mid + 1\n", + "\n", + " # Target was not found\n", + " return -1\n", + "\n", + "\n", + "### Example Usage\n", + "\n", + "\n", + "# Example sorted list\n", + "arr = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]\n", + "target = 7\n", + "\n", + "# Perform binary search\n", + "result = binary_search(arr, target)\n", + "\n", + "if result != -1:\n", + " print(f'Target {target} found at index {result}.')\n", + "else:\n", + " print(f'Target {target} not found in the list.')\n", + "\n", + "\n", + "### Key Points\n", + "\n", + "1. **Time Complexity**: The time complexity of binary search is O(log n), where n is the number of elements in the array. This is significantly more efficient than a linear search, which has a time complexity of O(n).\n", + " \n", + "2. **Space Complexity**: The space complexity of the binary search algorithm is O(1) for the iterative version, as it requires a fixed amount of space for variables.\n", + "\n", + "3. **Iterative vs Recursive**: The above implementation is iterative, which is generally preferred for binary search due to its efficiency and avoidance of recursion limits. However, a recursive implementation can also be done:\n", + "\n", + "### Recursive Implementation\n", + "\n", + "\n", + "def binary_search_recursive(arr, target, low, high):\n", + " if low <= high:\n", + " mid = (low + high) // 2\n", + " \n", + " if arr[mid] == target:\n", + " return mid\n", + " elif arr[mid] > target:\n", + " return binary_search_recursive(arr, target, low, mid - 1)\n", + " else:\n", + " return binary_search_recursive(arr, target, mid + 1, high)\n", + " \n", + " return -1\n", + "\n", + "\n", + "### Conclusion\n", + "\n", + "Binary search is a fundamental searching technique that exploits the properties of sorted arrays. Its efficiency makes it a preferred method for searching when working with large datasets. Understanding its underlying algorithm and being able to implement it in Python is a valuable skill in software engineering and data science." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n",
+       "│ Getting response from llama3.2...                                                                               │\n",
+       "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "
\n" + ], + "text/plain": [ + "╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮\n", + "│ \u001b[1;32mGetting response from llama3.2...\u001b[0m │\n", + "╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "## llama3.2 Response\n", + "**Implementing Binary Search Algorithm in Python**\n", + "=====================================================\n", + "\n", + "Binary search is an efficient algorithm for finding an item from a sorted list of items. It works by repeatedly dividing in half the portion of the list that could contain the item, until you've narrowed down the possible locations to just one.\n", + "\n", + "Here's a step-by-step guide on how to implement binary search in Python:\n", + "\n", + "**Step 1: Define the Search Function**\n", + "-------------------------------------\n", + "\n", + "```python\n", + "def binary_search(arr, target):\n", + " \"\"\"\n", + " Searches for an element in a sorted array using binary search algorithm.\n", + " \n", + " Parameters:\n", + " arr (list): The sorted list of elements.\n", + " target: The element to be searched.\n", + " \n", + " Returns:\n", + " int: The index of the target element if found; otherwise, -1.\n", + " \"\"\"\n", + "```\n", + "\n", + "**Step 2: Initialize Variables**\n", + "---------------------------------\n", + "\n", + "```python\n", + " low = 0 # Index of the first element in the list\n", + " high = len(arr) - 1 # Index of the last element in the list\n", + "```\n", + "\n", + "**Step 3: Loop Until Found or Not Found**\n", + "-----------------------------------------\n", + "\n", + "```python\n", + " while low <= high:\n", + " mid = (low + high) // 2 # Calculate the middle index\n", + " \n", + " if arr[mid] == target:\n", + " return mid # Target found, return its index\n", + " \n", + " elif arr[mid] < target:\n", + " low = mid + 1 # Search in the right half\n", + " \n", + " else:\n", + " high = mid - 1 # Search in the left half\n", + "```\n", + "\n", + "**Step 4: Handle Edge Cases**\n", + "---------------------------\n", + "\n", + "```python\n", + " if low > high:\n", + " return -1 # Target not found, return -1\n", + "```\n", + "\n", + "**Putting it all Together**\n", + "-----------------------------\n", + "\n", + "Here's the complete binary search implementation in Python:\n", + "\n", + "```python\n", + "def binary_search(arr, target):\n", + " \"\"\"\n", + " Searches for an element in a sorted array using binary search algorithm.\n", + " \n", + " Parameters:\n", + " arr (list): The sorted list of elements.\n", + " target: The element to be searched.\n", + " \n", + " Returns:\n", + " int: The index of the target element if found; otherwise, -1.\n", + " \"\"\"\n", + " low = 0\n", + " high = len(arr) - 1\n", + "\n", + " while low <= high:\n", + " mid = (low + high) // 2\n", + " \n", + " if arr[mid] == target:\n", + " return mid\n", + " elif arr[mid] < target:\n", + " low = mid + 1\n", + " else:\n", + " high = mid - 1\n", + " \n", + " return -1\n", + "\n", + "# Example usage\n", + "arr = [2, 4, 6, 8, 10]\n", + "target = 6\n", + "index = binary_search(arr, target)\n", + "if index != -1:\n", + " print(f\"Target {target} found at index {index}\")\n", + "else:\n", + " print(\"Target not found\")\n", + "```\n", + "\n", + "**Time Complexity**\n", + "------------------\n", + "\n", + "The time complexity of binary search is O(log n), where n is the length of the input array. This makes it much faster than linear search (O(n)) for large datasets.\n", + "\n", + "I hope this explanation helps! Let me know if you have any further questions or need additional clarification." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + "
\n", + "

gpt-4o-mini

\n", + "
Binary search is an efficient algorithm for finding a target value within a sorted list. It works by repeatedly dividing the search interval in half. If the target value is less than the element in the middle of the interval, the search continues on the lower half; otherwise, it continues on the upper half. This process is continued until the target value is found or the search interval is empty.\n", + "\n", + "Here's a detailed explanation and implementation of the binary search algorithm in Python:\n", + "\n", + "### Step-by-Step Implementation\n", + "\n", + "1. **Prerequisites**:\n", + " - Ensure the input list is sorted. Binary search can only be performed on a sorted list.\n", + " \n", + "2. **Set Initial Variables**:\n", + " - Define two pointers, `low` and `high`, which represent the starting and ending indices of the search range in the list.\n", + "\n", + "3. **Calculate the Middle Index**:\n", + " - Use the formula `mid = (low + high) // 2` to find the middle index.\n", + "\n", + "4. **Comparison**:\n", + " - Compare the middle element with the target:\n", + " - If the middle element is equal to the target, return the index of the middle element.\n", + " - If the target is less than the middle element, narrow the search to the left half by setting `high = mid - 1`.\n", + " - If the target is greater than the middle element, narrow the search to the right half by setting `low = mid + 1`.\n", + "\n", + "5. **Loop Until the Target is Found or the Interval is Empty**:\n", + " - Repeat the above steps until the `low` pointer exceeds the `high` pointer. If the target is not found, return a value indicating that the target is not present (commonly -1).\n", + "\n", + "### Implementation in Python\n", + "\n", + "Here’s a complete Python implementation of the binary search algorithm:\n", + "\n", + "```python\n", + "def binary_search(arr, target):\n", + " low = 0\n", + " high = len(arr) - 1\n", + "\n", + " while low <= high:\n", + " # Find the middle index\n", + " mid = (low + high) // 2\n", + " \n", + " # Check if the target is present at mid\n", + " if arr[mid] == target:\n", + " return mid # Target found, return the index\n", + " \n", + " # If the target is smaller than the mid element,\n", + " # it can only be present in the left subarray\n", + " elif arr[mid] > target:\n", + " high = mid - 1\n", + " \n", + " # If the target is larger than the mid element,\n", + " # it can only be present in the right subarray\n", + " else:\n", + " low = mid + 1\n", + "\n", + " # Target was not found\n", + " return -1\n", + "```\n", + "\n", + "### Example Usage\n", + "\n", + "```python\n", + "# Example sorted list\n", + "arr = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]\n", + "target = 7\n", + "\n", + "# Perform binary search\n", + "result = binary_search(arr, target)\n", + "\n", + "if result != -1:\n", + " print(f'Target {target} found at index {result}.')\n", + "else:\n", + " print(f'Target {target} not found in the list.')\n", + "```\n", + "\n", + "### Key Points\n", + "\n", + "1. **Time Complexity**: The time complexity of binary search is O(log n), where n is the number of elements in the array. This is significantly more efficient than a linear search, which has a time complexity of O(n).\n", + " \n", + "2. **Space Complexity**: The space complexity of the binary search algorithm is O(1) for the iterative version, as it requires a fixed amount of space for variables.\n", + "\n", + "3. **Iterative vs Recursive**: The above implementation is iterative, which is generally preferred for binary search due to its efficiency and avoidance of recursion limits. However, a recursive implementation can also be done:\n", + "\n", + "### Recursive Implementation\n", + "\n", + "```python\n", + "def binary_search_recursive(arr, target, low, high):\n", + " if low <= high:\n", + " mid = (low + high) // 2\n", + " \n", + " if arr[mid] == target:\n", + " return mid\n", + " elif arr[mid] > target:\n", + " return binary_search_recursive(arr, target, low, mid - 1)\n", + " else:\n", + " return binary_search_recursive(arr, target, mid + 1, high)\n", + " \n", + " return -1\n", + "```\n", + "\n", + "### Conclusion\n", + "\n", + "Binary search is a fundamental searching technique that exploits the properties of sorted arrays. Its efficiency makes it a preferred method for searching when working with large datasets. Understanding its underlying algorithm and being able to implement it in Python is a valuable skill in software engineering and data science.
\n", + "
\n", + "
\n", + "

llama3.2

\n", + "
**Implementing Binary Search Algorithm in Python**\n", + "=====================================================\n", + "\n", + "Binary search is an efficient algorithm for finding an item from a sorted list of items. It works by repeatedly dividing in half the portion of the list that could contain the item, until you've narrowed down the possible locations to just one.\n", + "\n", + "Here's a step-by-step guide on how to implement binary search in Python:\n", + "\n", + "**Step 1: Define the Search Function**\n", + "-------------------------------------\n", + "\n", + "```python\n", + "def binary_search(arr, target):\n", + " \"\"\"\n", + " Searches for an element in a sorted array using binary search algorithm.\n", + " \n", + " Parameters:\n", + " arr (list): The sorted list of elements.\n", + " target: The element to be searched.\n", + " \n", + " Returns:\n", + " int: The index of the target element if found; otherwise, -1.\n", + " \"\"\"\n", + "```\n", + "\n", + "**Step 2: Initialize Variables**\n", + "---------------------------------\n", + "\n", + "```python\n", + " low = 0 # Index of the first element in the list\n", + " high = len(arr) - 1 # Index of the last element in the list\n", + "```\n", + "\n", + "**Step 3: Loop Until Found or Not Found**\n", + "-----------------------------------------\n", + "\n", + "```python\n", + " while low <= high:\n", + " mid = (low + high) // 2 # Calculate the middle index\n", + " \n", + " if arr[mid] == target:\n", + " return mid # Target found, return its index\n", + " \n", + " elif arr[mid] < target:\n", + " low = mid + 1 # Search in the right half\n", + " \n", + " else:\n", + " high = mid - 1 # Search in the left half\n", + "```\n", + "\n", + "**Step 4: Handle Edge Cases**\n", + "---------------------------\n", + "\n", + "```python\n", + " if low > high:\n", + " return -1 # Target not found, return -1\n", + "```\n", + "\n", + "**Putting it all Together**\n", + "-----------------------------\n", + "\n", + "Here's the complete binary search implementation in Python:\n", + "\n", + "```python\n", + "def binary_search(arr, target):\n", + " \"\"\"\n", + " Searches for an element in a sorted array using binary search algorithm.\n", + " \n", + " Parameters:\n", + " arr (list): The sorted list of elements.\n", + " target: The element to be searched.\n", + " \n", + " Returns:\n", + " int: The index of the target element if found; otherwise, -1.\n", + " \"\"\"\n", + " low = 0\n", + " high = len(arr) - 1\n", + "\n", + " while low <= high:\n", + " mid = (low + high) // 2\n", + " \n", + " if arr[mid] == target:\n", + " return mid\n", + " elif arr[mid] < target:\n", + " low = mid + 1\n", + " else:\n", + " high = mid - 1\n", + " \n", + " return -1\n", + "\n", + "# Example usage\n", + "arr = [2, 4, 6, 8, 10]\n", + "target = 6\n", + "index = binary_search(arr, target)\n", + "if index != -1:\n", + " print(f\"Target {target} found at index {index}\")\n", + "else:\n", + " print(\"Target not found\")\n", + "```\n", + "\n", + "**Time Complexity**\n", + "------------------\n", + "\n", + "The time complexity of binary search is O(log n), where n is the length of the input array. This makes it much faster than linear search (O(n)) for large datasets.\n", + "\n", + "I hope this explanation helps! Let me know if you have any further questions or need additional clarification.
\n", + "
\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Define a new question\n", + "new_question = \"Explain how to implement a binary search algorithm in Python.\"\n", + "\n", + "console.print(Panel(f\"[bold]New Question:[/bold]\\n{new_question}\", border_style=\"green\"))\n", + "\n", + "# Get responses for the new question\n", + "new_responses = tutor.ask(new_question)\n", + "\n", + "# Compare responses\n", + "tutor.compare_responses()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dca14276-c3d5-493a-aa1f-8dc4c23b144d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}