Files
LLM_Engineering_OLD/week1/community-contributions/Week1-Exercise-Gemini-With-GenAI-SDK.ipynb

204 lines
6.6 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "6e19458c-4b0e-40f6-bd4f-4d9c80ea671b",
"metadata": {},
"source": [
"# End of Week 1 - Exercise - Using Gemini API with GenAI SDK"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1a125bb-737f-41a5-8dd1-626cd8efe6e2",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from google import genai\n",
"from google.genai import types\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "acf37451-3732-455b-a906-87f66053b018",
"metadata": {},
"outputs": [],
"source": [
"# Load API Key - For Gemini it automatically takes the api key from env file if we save the key using GOOGLE_API_KEY keyword\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4c2fccf9-e419-431e-97fc-a42fcf67c633",
"metadata": {},
"outputs": [],
"source": [
"# Initialze Google Client\n",
"# Just to make it explicit i have used the api_key parameter but thats optional and genai.client automatically takes from .env file\n",
"\n",
"try:\n",
" client = genai.Client(api_key=api_key)\n",
" print(\"Google GenAI Client initialized successfully!\")\n",
"except Exception as e:\n",
" print(f\"Error initializing GenAI Client: {e}\")\n",
" print(\"Ensure your GOOGLE_API_KEY is correctly set as an environment variable.\")\n",
" exit()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5b918afd-ed3b-49d1-85f1-6e549faec66e",
"metadata": {},
"outputs": [],
"source": [
"# Get list of models\n",
"print(\"List of models that support generateContent:\\n\")\n",
"for m in client.models.list():\n",
" for action in m.supported_actions:\n",
" if action == \"generateContent\":\n",
" print(m.name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "791da71e-35a5-4a15-90c7-93ae22e40232",
"metadata": {},
"outputs": [],
"source": [
"MODEL_GEMINI = 'gemini-2.5-flash-preview-05-20'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2a536e25-060e-4f93-bbd7-d80195620bba",
"metadata": {},
"outputs": [],
"source": [
"# System Definitions\n",
"\n",
"system_instruction_prompt = (\n",
" \"You are an expert Python programming assistant. Your goal is to identify common coding errors, suggest improvements for readability and efficiency,and provide corrected code snippets.\\\n",
" Always format code blocks using Markdown.\\\n",
" Be concise but thorough. Focus on the provided code and context.\"\n",
")\n",
"\n",
"generate_content_config = types.GenerateContentConfig(system_instruction=system_instruction_prompt)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2fc2a778-f175-44ec-9535-f81deeca7f1a",
"metadata": {},
"outputs": [],
"source": [
"# Main program to get user input and then use model to respond.\n",
"\n",
"MAX_HISTORY_MESSAGES = 6\n",
"conversation_contents = []\n",
"\n",
"print(\"\\n--- Start Chat with Gemini Python Assistant ---\")\n",
"print(\"Type 'Done' to exit the conversation.\")\n",
"\n",
"while True:\n",
" user_input = input(\"You: \").strip()\n",
"\n",
" if user_input.lower() == \"done\": \n",
" print(\"\\nExiting chat. Goodbye!\")\n",
" break \n",
"\n",
" if not user_input: \n",
" print(\"Please enter a question or 'Done' to exit.\")\n",
" continue\n",
" \n",
" try:\n",
" user_message_content = types.Content(\n",
" role=\"user\",\n",
" parts=[types.Part.from_text(text=user_input)]\n",
" ) \n",
" \n",
" conversation_contents.append(user_message_content) \n",
" \n",
" stream_response = client.models.generate_content_stream(\n",
" model=MODEL_GEMINI,\n",
" contents=conversation_contents,\n",
" config=generate_content_config,\n",
" )\n",
" \n",
" model_full_response_text = \"**Gemini:**\\n\\n\"\n",
" current_display_handle = display(Markdown(\"\"), display_id=True)\n",
" \n",
" \n",
" for chunk in stream_response:\n",
" chunk_text = chunk.text or ''\n",
" model_full_response_text += chunk_text\n",
" update_display(Markdown(model_full_response_text), display_id=current_display_handle.display_id)\n",
" \n",
" # Add Model's FULL Response to Conversation History\n",
" model_message_content = types.Content(\n",
" role=\"model\",\n",
" parts=[types.Part.from_text(text=model_full_response_text.removeprefix(\"**Gemini:**\\n\\n\"))]\n",
" )\n",
" \n",
" conversation_contents.append(model_message_content)\n",
" \n",
" conversation_contents = conversation_contents[-MAX_HISTORY_MESSAGES:] \n",
"\n",
" except Exception as e:\n",
" print(f\"\\nAn error occurred during interaction: {e}\")\n",
" if conversation_contents:\n",
" conversation_contents.pop()\n",
" print(\"Please try asking your question again or type 'Done' to exit.\")\n",
" continue "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a86c3e5b-516b-42dc-994f-9dfa75c610cc",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}