Merge pull request #645 from rohanjangala/community-contributions-branch
Added week 1 exercise w/ free endpoints to community contributions
This commit is contained in:
188
week1/community-contributions/week1_exercise_FreeTier.ipynb
Normal file
188
week1/community-contributions/week1_exercise_FreeTier.ipynb
Normal file
@@ -0,0 +1,188 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# This is a fork of end of week 1 exercise - with only free-tier/local endpoints\n",
|
||||
"\n",
|
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n",
|
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!\n",
|
||||
"\n",
|
||||
"- For free Github token, visit https://github.com/settings/tokens. The cool thing about Github models is you can try out different models with various endpoints. We will use OpenAI model as intended for this exercise. You can check out marketplace at https://github.com/marketplace/models to see all the available ones.\n",
|
||||
"- Don't forget to save this token in .env file as GITHUB_API_KEY = 'your-key'!\n",
|
||||
"- Also, please run ollama run llama3.2 in your terminal of !ollama run llama3.2 as a cell code to install the model if you haven't."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from IPython.display import Markdown, display, update_display"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "156f046f-3770-4c39-b576-ec9d2cb42525",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load environment variables\n",
|
||||
"load_dotenv(override=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# constants\n",
|
||||
"\n",
|
||||
"MODEL_GPT = 'gpt-4o-mini'\n",
|
||||
"MODEL_LLAMA = 'llama3.2'\n",
|
||||
"\n",
|
||||
"github_endpoint = \"https://models.github.ai/inference\"\n",
|
||||
"ollama_endpoint = \"http://localhost:11434/v1\"\n",
|
||||
"\n",
|
||||
"GITHUB_API_KEY = os.getenv('GITHUB_TOKEN')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up environments\n",
|
||||
"openai = OpenAI(\n",
|
||||
" base_url = github_endpoint,\n",
|
||||
" api_key = GITHUB_API_KEY\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ollama = OpenAI(\n",
|
||||
" base_url = ollama_endpoint,\n",
|
||||
" api_key = \"llama\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# here is the question; type over this to ask something new\n",
|
||||
"\n",
|
||||
"question = \"\"\"\n",
|
||||
"Please explain what this code does and why:\n",
|
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "33ba4488-dc88-4caf-85d9-8bdb23a60de2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get gpt-4o-mini to answer\n",
|
||||
"def stream_message(prompt):\n",
|
||||
" stream = openai.chat.completions.create(\n",
|
||||
" model = MODEL_GPT,\n",
|
||||
" messages = [{\"role\":\"user\", \"content\": prompt}],\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
" response = \"\"\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in stream:\n",
|
||||
" if chunk.choices:\n",
|
||||
" response += chunk.choices[0].delta.content or ''\n",
|
||||
" update_display(Markdown(response), display_id=display_handle.display_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "43ba38d5-f62a-4f8d-ba66-55425c8b0d64",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"stream_message(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get Llama 3.2 to answer\n",
|
||||
"response = ollama.chat.completions.create(\n",
|
||||
" model = MODEL_LLAMA,\n",
|
||||
" messages = [{\"role\":\"user\", \"content\": question}]\n",
|
||||
")\n",
|
||||
"response2 = response.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e2a85c12-4a8d-4539-af3f-a4d76375105b",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display(Markdown(response2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c6f78a8-b3de-4327-9148-6eb385c23af0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agents",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user