Merge branch 'main' of github.com:ed-donner/llm_engineering
This commit is contained in:
@@ -0,0 +1,39 @@
|
||||
OPENAI_MODEL = "gpt-4o-mini"
|
||||
CLAUDE_MODEL = "claude-3-5-haiku-20241022"
|
||||
|
||||
OUTPUT_MAX_TOKEN = 2000
|
||||
|
||||
PYTHON_CODE = '''
|
||||
import math
|
||||
|
||||
def pairwise_distance(points_a, points_b):
|
||||
"""
|
||||
Compute the pairwise Euclidean distance between two sets of 3D points.
|
||||
|
||||
Args:
|
||||
points_a: list of (x, y, z)
|
||||
points_b: list of (x, y, z)
|
||||
Returns:
|
||||
A 2D list of shape (len(points_a), len(points_b)) representing distances
|
||||
"""
|
||||
distances = []
|
||||
for i in range(len(points_a)):
|
||||
row = []
|
||||
for j in range(len(points_b)):
|
||||
dx = points_a[i][0] - points_b[j][0]
|
||||
dy = points_a[i][1] - points_b[j][1]
|
||||
dz = points_a[i][2] - points_b[j][2]
|
||||
d = math.sqrt(dx * dx + dy * dy + dz * dz)
|
||||
row.append(d)
|
||||
distances.append(row)
|
||||
return distances
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
import random
|
||||
points_a = [(random.random(), random.random(), random.random()) for _ in range(100)]
|
||||
points_b = [(random.random(), random.random(), random.random()) for _ in range(100)]
|
||||
dists = pairwise_distance(points_a, points_b)
|
||||
print(f"Distance[0][0] = {dists[0][0]:.4f}")
|
||||
'''
|
||||
223
week4/community-contributions/tsungyulin_code_accelerate/main.py
Normal file
223
week4/community-contributions/tsungyulin_code_accelerate/main.py
Normal file
@@ -0,0 +1,223 @@
|
||||
import dotenv
|
||||
import os
|
||||
from openai import OpenAI
|
||||
from anthropic import Anthropic
|
||||
import gradio as gr
|
||||
|
||||
# from .config import *
|
||||
|
||||
OPENAI_MODEL = "gpt-4o-mini"
|
||||
CLAUDE_MODEL = "claude-3-5-haiku-20241022"
|
||||
|
||||
OUTPUT_MAX_TOKEN = 2000
|
||||
|
||||
CSS = """
|
||||
body {
|
||||
background: #f4f6fa;
|
||||
font-family: 'Segoe UI', Roboto, sans-serif;
|
||||
}
|
||||
|
||||
.raw textarea {
|
||||
border: 1.5px solid #00FFBF !important;
|
||||
box-shadow: 0 0 10px rgba(229, 115, 115, 0.3);
|
||||
color: #00FFBF !important;
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
.optimize textarea {
|
||||
border: 1.5px solid #FFBF00 !important;
|
||||
box-shadow: 0 0 10px rgba(129, 199, 132, 0.3);
|
||||
color: #FFBF00 !important;
|
||||
font-size: 24px
|
||||
}
|
||||
|
||||
button {
|
||||
background: linear-gradient(90deg, #2196f3, #00BFFF);
|
||||
color: white !important;
|
||||
font-weight: bold;
|
||||
border-radius: 8px !important;
|
||||
transition: all 0.2s ease-in-out;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background: linear-gradient(90deg, #21cbf3, #2196f3);
|
||||
transform: scale(1.05);
|
||||
}
|
||||
|
||||
h1 {
|
||||
text-align: center;
|
||||
color: #1565c0;
|
||||
font-size: 38px;
|
||||
}
|
||||
"""
|
||||
|
||||
PYTHON_CODE = '''
|
||||
import math
|
||||
|
||||
def pairwise_distance(points_a, points_b):
|
||||
"""
|
||||
Compute the pairwise Euclidean distance between two sets of 3D points.
|
||||
|
||||
Args:
|
||||
points_a: list of (x, y, z)
|
||||
points_b: list of (x, y, z)
|
||||
Returns:
|
||||
A 2D list of shape (len(points_a), len(points_b)) representing distances
|
||||
"""
|
||||
distances = []
|
||||
for i in range(len(points_a)):
|
||||
row = []
|
||||
for j in range(len(points_b)):
|
||||
dx = points_a[i][0] - points_b[j][0]
|
||||
dy = points_a[i][1] - points_b[j][1]
|
||||
dz = points_a[i][2] - points_b[j][2]
|
||||
d = math.sqrt(dx * dx + dy * dy + dz * dz)
|
||||
row.append(d)
|
||||
distances.append(row)
|
||||
return distances
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
import random
|
||||
points_a = [(random.random(), random.random(), random.random()) for _ in range(100)]
|
||||
points_b = [(random.random(), random.random(), random.random()) for _ in range(100)]
|
||||
dists = pairwise_distance(points_a, points_b)
|
||||
print(f"Distance[0][0] = {dists[0][0]:.4f}")
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
dotenv.load_dotenv(override=True)
|
||||
os.environ['OPENAI_API_KEY'] = os.getenv(
|
||||
'OPENAI_API_KEY', 'your-key-if-not-using-env')
|
||||
os.environ['ANTHROPIC_API_KEY'] = os.getenv(
|
||||
'ANTHROPIC_API_KEY', 'your-key-if-not-using-env')
|
||||
|
||||
# codeReviser = CodeAccelerator('openai', os.getenv('OPENAI_API_KEY'))
|
||||
codeReviser = CodeAccelerator('anthropic', os.getenv('ANTHROPIC_API_KEY'))
|
||||
|
||||
display_ui(codeReviser)
|
||||
|
||||
|
||||
def safe_exec(code_str):
|
||||
import io
|
||||
import sys
|
||||
import time
|
||||
import ast
|
||||
# Build the buffer of IO to extract ouput of stdout
|
||||
stdout_buffer = io.StringIO()
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = stdout_buffer
|
||||
|
||||
try:
|
||||
tree = ast.parse(code_str)
|
||||
compiled = compile(tree, filename="<ast>", mode="exec")
|
||||
local_vars = {}
|
||||
start = time.time()
|
||||
exec(compiled, {}, local_vars)
|
||||
exec_time = time.time() - start
|
||||
print(f"This code spend {exec_time:.8f} seconds\n")
|
||||
|
||||
# recover sys.stdout
|
||||
sys.stdout = old_stdout
|
||||
output_text = stdout_buffer.getvalue()
|
||||
return output_text
|
||||
|
||||
except Exception as e:
|
||||
sys.stdout = old_stdout
|
||||
return f"Error: {e}"
|
||||
|
||||
|
||||
def display_ui(codeReviser):
|
||||
def _optimize(pythonCode):
|
||||
for text in codeReviser.respond(pythonCode):
|
||||
yield text.replace("```python", "").replace("```", "")
|
||||
|
||||
with gr.Blocks(css=CSS) as ui:
|
||||
gr.Markdown("# ✨Convert Python code for accelation")
|
||||
with gr.Row():
|
||||
beforeBlock = gr.Textbox(
|
||||
label="raw python code", value=PYTHON_CODE, lines=20, elem_classes=["raw"])
|
||||
afterBlock = gr.Textbox(
|
||||
label="optimized python code", lines=20, elem_classes=["optimize"])
|
||||
with gr.Row():
|
||||
convert = gr.Button("Convert code")
|
||||
with gr.Row():
|
||||
rawRunButton = gr.Button("Run raw code")
|
||||
optRunButton = gr.Button("Run optimized code")
|
||||
with gr.Row():
|
||||
rawOut = gr.TextArea(label="Raw result:",
|
||||
elem_classes=["raw"])
|
||||
optimizeOut = gr.TextArea(
|
||||
label="Optimize result:", elem_classes=["optimize"])
|
||||
|
||||
convert.click(_optimize,
|
||||
inputs=[beforeBlock], outputs=[afterBlock])
|
||||
rawRunButton.click(safe_exec, inputs=[beforeBlock], outputs=[rawOut])
|
||||
optRunButton.click(safe_exec, inputs=[
|
||||
afterBlock], outputs=[optimizeOut])
|
||||
|
||||
ui.launch(inbrowser=True)
|
||||
|
||||
|
||||
class CodeAccelerator:
|
||||
|
||||
def __init__(self, frontierModel: str, apiKey):
|
||||
self.frontierModel = frontierModel
|
||||
|
||||
if frontierModel == "openai":
|
||||
self.llm = OpenAI(api_key=apiKey)
|
||||
elif frontierModel == "anthropic":
|
||||
self.llm = Anthropic(api_key=apiKey)
|
||||
else:
|
||||
raise ValueError(f'frontierModel {frontierModel} is invalid.')
|
||||
|
||||
def _getChatTemplate(self, pythonCode):
|
||||
_code = pythonCode.strip()
|
||||
|
||||
systemPrompt = '''
|
||||
You are an assistant that reimplements Python code in high performance and just spend the fastest possible time for an windows laptop.
|
||||
Respond only with Python code; use comments sparingly and do not provide any explanation other than occasional comments.
|
||||
The new Python code response needs to produce an identical output in the fastest possible time.
|
||||
'''
|
||||
userPrompt = f'''
|
||||
Rewrite this Python code with the fastest possible implementation that produces identical output in the least time.
|
||||
Respond only with Python code; do not explain your work other than a few comments.
|
||||
Remember to import all necessary python packages such as numpy.\n\n
|
||||
|
||||
{_code}
|
||||
'''
|
||||
return [
|
||||
{"role": "system", "content": systemPrompt},
|
||||
{"role": "user", "content": userPrompt}
|
||||
]
|
||||
|
||||
def respond(self, pythonCode):
|
||||
"""Generator"""
|
||||
chatTemplate = self._getChatTemplate(pythonCode)
|
||||
reply = ""
|
||||
if self.frontierModel == 'openai':
|
||||
stream = self.llm.chat.completions.create(messages=chatTemplate,
|
||||
model=OPENAI_MODEL,
|
||||
max_tokens=OUTPUT_MAX_TOKEN,
|
||||
stream=True)
|
||||
for chunk in stream:
|
||||
chunkText = chunk.choices[0].delta.content or ""
|
||||
reply += chunkText
|
||||
yield reply
|
||||
elif self.frontierModel == "anthropic":
|
||||
stream = self.llm.messages.create(model=CLAUDE_MODEL,
|
||||
system=chatTemplate[0]['content'],
|
||||
messages=chatTemplate[1:],
|
||||
max_tokens=OUTPUT_MAX_TOKEN,
|
||||
stream=True)
|
||||
|
||||
for chunk in stream:
|
||||
chunkText = chunk.delta.text if chunk.type == "content_block_delta" else ""
|
||||
reply += chunkText
|
||||
yield reply
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
361
week4/community-contributions/w4d3_add_models.ipynb
Normal file
361
week4/community-contributions/w4d3_add_models.ipynb
Normal file
@@ -0,0 +1,361 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Code Generator\n",
|
||||
"\n",
|
||||
"The requirement: use a Frontier model to generate high performance C++ code from Python code\n",
|
||||
"\n",
|
||||
"- Try adding Gemini to the Closed Source mix\n",
|
||||
"- Try more open-source models such as CodeLlama and StarCoder, and see if you can get CodeGemma to work\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import io\n",
|
||||
"import sys\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"# import google.generativeai\n",
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"import anthropic\n",
|
||||
"import ollama\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"import gradio as gr"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"# google.generativeai.configure()\n",
|
||||
"client = genai.Client()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-sonnet-4-20250514\"\n",
|
||||
"GEMINI_MODEL = 'gemini-2.5-flash'\n",
|
||||
"LLAMA_MODEL = \"llama3.2\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
|
||||
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
|
||||
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time. \"\n",
|
||||
"system_message += \"Do not include Markdown formatting (```), language tags (cpp), or extra text. \""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for(python):\n",
|
||||
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
|
||||
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
|
||||
" user_prompt += \"\"\"Your response must not contain neither the leading \"```cpp\" nor the trailer: \"```\" strings! \"\"\"\n",
|
||||
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
|
||||
" user_prompt += python\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# write to a file called optimized.cpp\n",
|
||||
"\n",
|
||||
"def write_output(cpp):\n",
|
||||
" with open(\"optimized.cpp\", \"w\") as f:\n",
|
||||
" f.write(cpp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a1cbb778-fa57-43de-b04b-ed523f396c38",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pi = \"\"\"\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"def calculate(iterations, param1, param2):\n",
|
||||
" result = 1.0\n",
|
||||
" for i in range(1, iterations+1):\n",
|
||||
" j = i * param1 - param2\n",
|
||||
" result -= (1/j)\n",
|
||||
" j = i * param1 + param2\n",
|
||||
" result += (1/j)\n",
|
||||
" return result\n",
|
||||
"\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = calculate(100_000_000, 4, 1) * 4\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(f\"Result: {result:.12f}\")\n",
|
||||
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_hard = \"\"\"# Be careful to support large number sizes\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
"\n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\n",
|
||||
"# Parameters\n",
|
||||
"n = 10000 # Number of random numbers\n",
|
||||
"initial_seed = 42 # Initial seed for the LCG\n",
|
||||
"min_val = -10 # Minimum value of random numbers\n",
|
||||
"max_val = 10 # Maximum value of random numbers\n",
|
||||
"\n",
|
||||
"# Timing the function\n",
|
||||
"import time\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
|
||||
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gpt(python):\n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8669f56b-8314-4582-a167-78842caea131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" yield reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "97205162",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gemini(python):\n",
|
||||
" response = client.models.generate_content_stream(\n",
|
||||
" model=GEMINI_MODEL,\n",
|
||||
" config=types.GenerateContentConfig(\n",
|
||||
" system_instruction=system_message),\n",
|
||||
" contents=user_prompt_for(python)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in response:\n",
|
||||
" fragment = chunk.text or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f94b13e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_llama_local(python):\n",
|
||||
" stream = ollama.chat(\n",
|
||||
" model='llama3.2',\n",
|
||||
" messages=messages_for(python),\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" reply = \"\"\n",
|
||||
" # Iterate through the streamed chunks and print the content\n",
|
||||
" for chunk in stream:\n",
|
||||
" #print(chunk['message']['content'], end='', flush=True)\n",
|
||||
" if 'content' in chunk['message']:\n",
|
||||
" fragment = chunk['message']['content']\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" elif model==\"Gemini\":\n",
|
||||
" result = stream_gemini(python)\n",
|
||||
" elif model==\"Llama\":\n",
|
||||
" result = stream_llama_local(python)\n",
|
||||
"\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code, copy it here: https://www.programiz.com/cpp-programming/online-compiler/\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"Llama\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,499 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Trading Code Generator\n",
|
||||
"\n",
|
||||
"A code generator that writes trading code to buy and sell equities in a simulated environment, based on a given API\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import io\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"import anthropic\n",
|
||||
"import ollama\n",
|
||||
"import gradio as gr\n",
|
||||
"import requests\n",
|
||||
"from typing import Any"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"client = genai.Client()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-sonnet-4-20250514\"\n",
|
||||
"GEMINI_MODEL = 'gemini-2.5-flash'\n",
|
||||
"LLAMA_MODEL = \"llama3.2\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36b0a6f6",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"\"\"\n",
|
||||
"You are an effective programming assistant specialized to generate Python code based on the inputs.\n",
|
||||
"Respond only with Python code; use comments sparingly and do not provide any explanation other than occasional comments.\n",
|
||||
"Do not include Markdown formatting (```), language tags (python), or extra text.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_user_prompt_for_trade_code_generation(currency, wallet_balance):\n",
|
||||
" wallet_balance = str(wallet_balance)\n",
|
||||
"\n",
|
||||
" user_prompt = \"\"\"\n",
|
||||
" Create a simple Crypto trading engine Python code.\n",
|
||||
" The engine will sell or buy the given crypto currency against USDT (Tether) based on the available wallet balance\n",
|
||||
" This should be a simple Python code, not a function\n",
|
||||
" The currency is: {}\n",
|
||||
" The wallet balance is: {}\"\"\".format(currency, wallet_balance)\n",
|
||||
" user_prompt += \"\"\"\n",
|
||||
" Output will be a text containing the followings:\n",
|
||||
" - advice to sell or buy\n",
|
||||
" - amount in USDT\n",
|
||||
" Rules you have to apply in the code:\n",
|
||||
" - compose symbol: convert the input `crypto_currency` argument to upper case and concatenate it to string \"USDT\"\n",
|
||||
" - compose url passing the previously composed symbol: `url = f\"https://data-api.binance.vision/api/v3/ticker/24hr?symbol={symbol}`\n",
|
||||
" - call the api from with this url, expect to get the following json response, for example:\n",
|
||||
" {'symbol': 'BTCUSDT',\n",
|
||||
" 'priceChange': '1119.99000000',\n",
|
||||
" 'priceChangePercent': '0.969',\n",
|
||||
" 'weightedAvgPrice': '116314.23644195',\n",
|
||||
" 'prevClosePrice': '115600.00000000',\n",
|
||||
" 'lastPrice': '116719.99000000',\n",
|
||||
" 'lastQty': '0.05368000',\n",
|
||||
" 'bidPrice': '116719.99000000',\n",
|
||||
" 'bidQty': '2.81169000',\n",
|
||||
" 'askPrice': '116720.00000000',\n",
|
||||
" 'askQty': '3.46980000',\n",
|
||||
" 'openPrice': '115600.00000000',\n",
|
||||
" 'highPrice': '117286.73000000',\n",
|
||||
" 'lowPrice': '114737.11000000',\n",
|
||||
" 'volume': '12500.51369000',\n",
|
||||
" 'quoteVolume': '1453987704.98443060',\n",
|
||||
" 'openTime': 1758015394001,\n",
|
||||
" 'closeTime': 1758101794001,\n",
|
||||
" 'firstId': 5236464586,\n",
|
||||
" 'lastId': 5238628513,\n",
|
||||
" 'count': 2163928}\n",
|
||||
" - build a logic based on the retrieving values which can decide whether the engine should sell or buy he given crypto currency\n",
|
||||
" - in the logic the code should also decide what is the confident level of selling or buying.\n",
|
||||
" - if the confident level is high the `amount` should be higher (closer to the `current_wallet_balance`)\n",
|
||||
" - if the confident level is lower then the amount should be lower as well\n",
|
||||
" - error handling:\n",
|
||||
" - if the api call returns with a json hving `code`, `msg` keys in it (eg. 'code': -1121, 'msg': 'Invalid symbol.') then handle this error message\n",
|
||||
" Response rule: in your response do not include Markdown formatting (```), language tags (python), or extra text.\n",
|
||||
" \"\"\"\n",
|
||||
" return user_prompt\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5030fdf5",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_user_prompt_for_docstring_generation(python_code):\n",
|
||||
" return f\"\"\"\n",
|
||||
" Consider the following Python code: \\n\\n\n",
|
||||
" {python_code} \\n\\n\n",
|
||||
"\n",
|
||||
" Generate a docstring comment around this code and it alongside with the Python code. \\n\n",
|
||||
" Response rule: in your response do not include Markdown formatting (```), language tags (python), or extra text.\n",
|
||||
"\n",
|
||||
" \"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8dc065c2",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def trade_gpt(currency, wallet_balance):\n",
|
||||
" completion = openai.chat.completions.create(\n",
|
||||
" model=OPENAI_MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_trade_code_generation(\n",
|
||||
" currency,\n",
|
||||
" wallet_balance\n",
|
||||
" )\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" return completion.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3b402c67",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def code_comment_gpt(python_code):\n",
|
||||
" completion = openai.chat.completions.create(\n",
|
||||
" model=OPENAI_MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_docstring_generation(python_code)}\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" return completion.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0dc80287",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def trade_cloude(currency, wallet_balance):\n",
|
||||
" message = claude.messages.create(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" temperature=0.7,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_trade_code_generation(\n",
|
||||
" currency,\n",
|
||||
" wallet_balance\n",
|
||||
" )\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return message.content[0].text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90eb9547",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def code_comment_cloude(python_code):\n",
|
||||
" message = claude.messages.create(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" temperature=0.7,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_docstring_generation(python_code)\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return message.content[0].text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b94fbd55",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"def trade_gemini(currency, wallet_balance):\n",
|
||||
" response = client.models.generate_content(\n",
|
||||
" model=GEMINI_MODEL,\n",
|
||||
" config=types.GenerateContentConfig(\n",
|
||||
" system_instruction=system_message),\n",
|
||||
" contents=get_user_prompt_for_trade_code_generation(\n",
|
||||
" currency,\n",
|
||||
" wallet_balance\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return response.text\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f83ef7b8",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"def code_comment_gemini(python_code):\n",
|
||||
" response = client.models.generate_content(\n",
|
||||
" model=GEMINI_MODEL,\n",
|
||||
" config=types.GenerateContentConfig(\n",
|
||||
" system_instruction=system_message),\n",
|
||||
" contents=get_user_prompt_for_docstring_generation(python_code)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return response.text\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6737962d",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def trade_llama(currency, wallet_balance):\n",
|
||||
" completion = ollama.chat(\n",
|
||||
" model=LLAMA_MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_trade_code_generation(\n",
|
||||
" currency,\n",
|
||||
" wallet_balance\n",
|
||||
" )\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return completion['message']['content']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b815aa07",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def code_comment_llama(python_code):\n",
|
||||
" completion = ollama.chat(\n",
|
||||
" model=LLAMA_MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"user\", \"content\": get_user_prompt_for_docstring_generation(python_code)},\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return completion['message']['content']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9e07437",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def generate_python_code(input_model, currency, wallet_balance):\n",
|
||||
" model_mapping = {\"GPT\": trade_gpt(currency, wallet_balance),\n",
|
||||
" \"Claude\": trade_cloude(currency, wallet_balance),\n",
|
||||
" \"Gemini\": trade_gemini(currency, wallet_balance),\n",
|
||||
" \"Llama\": trade_llama(currency, wallet_balance)}\n",
|
||||
" try:\n",
|
||||
" return model_mapping[input_model]\n",
|
||||
" except KeyError as e:\n",
|
||||
" print(f\"{e}: {input_model} is not valid\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "016fed0e",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def comment_python_code(input_model, python_code):\n",
|
||||
" model_mapping = {\"GPT\": code_comment_gpt(python_code),\n",
|
||||
" \"Claude\": code_comment_cloude(python_code),\n",
|
||||
" \"Gemini\": code_comment_gemini(python_code),\n",
|
||||
" \"Llama\": code_comment_llama(python_code)\n",
|
||||
" }\n",
|
||||
" try:\n",
|
||||
" return model_mapping[input_model]\n",
|
||||
" except KeyError as e:\n",
|
||||
" print(f\"{e}: {input_model} is not valid\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e224a715",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def execute_python(code):\n",
|
||||
" code = code.replace('```python\\n','').replace('```','')\n",
|
||||
" try:\n",
|
||||
" output = io.StringIO()\n",
|
||||
" sys.stdout = output\n",
|
||||
" exec(code)\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = sys.__stdout__\n",
|
||||
" return output.getvalue()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ea96a88d",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "python"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" gr.Markdown(\"\"\"It generate Trading Python code, which will recommend you whether sell or buy a given crypto currency at its current price.\n",
|
||||
" Based on the confindency level of the prediction it will recommend what amount should be placed from your available wallet balance\"\"\")\n",
|
||||
" with gr.Row():\n",
|
||||
" crypto_currency = gr.Dropdown([\"BTC\", \"ETH\", \"SOL\"], label=\"The Crypto cyrrency\")\n",
|
||||
" wallet_balance = gr.Number(label=\"Enter a number\")\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"Llama\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" with gr.Row():\n",
|
||||
" generate_python_code_bt = gr.Button(\"Genarate Python code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" with gr.Column():\n",
|
||||
" python = gr.TextArea(label=\"Python Code\")\n",
|
||||
" python_comment = gr.Button(\"Comment Python code\")\n",
|
||||
" python_run = gr.Button(\"Run Python code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" result_out = gr.TextArea(label=\"Trading advice\")\n",
|
||||
"\n",
|
||||
" generate_python_code_bt.click(generate_python_code, inputs=[model, crypto_currency, wallet_balance], outputs=[python])\n",
|
||||
" python_comment.click(comment_python_code, inputs=[model, python], outputs=python)\n",
|
||||
" python_run.click(execute_python, inputs=[python], outputs=result_out)\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "",
|
||||
"version": ""
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
288
week4/community-contributions/w4d3_unit_test.ipynb
Normal file
288
week4/community-contributions/w4d3_unit_test.ipynb
Normal file
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Unit test Generator\n",
|
||||
"\n",
|
||||
"Create unit tests on the Python code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"import anthropic\n",
|
||||
"import ollama\n",
|
||||
"import gradio as gr"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"client = genai.Client()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-sonnet-4-20250514\"\n",
|
||||
"GEMINI_MODEL = 'gemini-2.5-flash'\n",
|
||||
"LLAMA_MODEL = \"llama3.2\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"\"\"\n",
|
||||
"You are an effective programming assistant specialized to generate Python code based on the inputs.\n",
|
||||
"Respond only with Python code; use comments sparingly and do not provide any explanation other than occasional comments.\n",
|
||||
"Do not include Markdown formatting (```), language tags (python), or extra text \\n.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for_unit_test(python):\n",
|
||||
" user_prompt = f\"\"\"\n",
|
||||
" Consider the following Python code: \\n\\n\n",
|
||||
" {python} \\n\\n\n",
|
||||
"\n",
|
||||
" Generate a unit test around this code and it alongside with the Python code. \\n\n",
|
||||
" Response rule: in your response do not include Markdown formatting (```), language tags (python), or extra text.\n",
|
||||
"\n",
|
||||
" \"\"\"\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for_unit_test(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for_unit_test(python)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_hard = \"\"\"\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
"\n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gpt(python):\n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for_unit_test(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8669f56b-8314-4582-a167-78842caea131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for_unit_test(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" yield reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "97205162",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gemini(python):\n",
|
||||
" response = client.models.generate_content_stream(\n",
|
||||
" model=GEMINI_MODEL,\n",
|
||||
" config=types.GenerateContentConfig(\n",
|
||||
" system_instruction=system_message),\n",
|
||||
" contents=user_prompt_for_unit_test(python)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in response:\n",
|
||||
" fragment = chunk.text or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f94b13e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_llama_local(python):\n",
|
||||
" stream = ollama.chat(\n",
|
||||
" model='llama3.2',\n",
|
||||
" messages=messages_for_unit_test(python),\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" reply = \"\"\n",
|
||||
" # Iterate through the streamed chunks and print the content\n",
|
||||
" for chunk in stream:\n",
|
||||
" #print(chunk['message']['content'], end='', flush=True)\n",
|
||||
" if 'content' in chunk['message']:\n",
|
||||
" fragment = chunk['message']['content']\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def generate_unit_test(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" elif model==\"Gemini\":\n",
|
||||
" result = stream_gemini(python)\n",
|
||||
" elif model==\"Llama\":\n",
|
||||
" result = stream_llama_local(python)\n",
|
||||
"\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
|
||||
" unit_test = gr.Textbox(label=\"Unit test\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"Llama\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" generate_ut = gr.Button(\"Generate Unit tests\")\n",
|
||||
"\n",
|
||||
" generate_ut.click(generate_unit_test, inputs=[python, model], outputs=[unit_test])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user