Launching refreshed version of LLM Engineering weeks 1-4 - see README
This commit is contained in:
655
week4/day3.ipynb
655
week4/day3.ipynb
@@ -18,18 +18,15 @@
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" <img src=\"../assets/resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#f71;\">Reminder: fetch latest code</h2>\n",
|
||||
" <span style=\"color:#f71;\">I'm continually improving these labs, adding more examples and exercises.\n",
|
||||
" At the start of each week, it's worth checking you have the latest code.<br/>\n",
|
||||
" First do a <a href=\"https://chatgpt.com/share/6734e705-3270-8012-a074-421661af6ba9\">git pull and merge your changes as needed</a>. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!<br/><br/>\n",
|
||||
" After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:<br/>\n",
|
||||
" <code>conda env update --f environment.yml --prune</code><br/>\n",
|
||||
" Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):<br/>\n",
|
||||
" <code>pip install -r requirements.txt</code>\n",
|
||||
" <br/>Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n",
|
||||
" After you've pulled the code, from the llm_engineering directory, in a Cursor Terminal, run:<br/>\n",
|
||||
" <code>uv sync</code><br/>\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
@@ -44,12 +41,12 @@
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" <img src=\"../assets/important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h1 style=\"color:#900;\">Important Note</h1>\n",
|
||||
" <span style=\"color:#900;\">\n",
|
||||
" In this lab, I use GPT-4o and Claude-3.5-Sonnet, which are the slightly higher priced models. The costs are still low, but if you'd prefer to keep costs ultra low, please make the suggested switches to the models (3 cells down from here).\n",
|
||||
" In this lab, I use high end models GPT 5, Claude 4.5 Sonnet, Gemini 2.5 Pro, Grok 4, which are the slightly higher priced models. The costs are still low, but if you'd prefer to keep costs ultra low, please pick lower cost models like gpt-5-nano.\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
@@ -66,15 +63,10 @@
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import io\n",
|
||||
"import sys\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import google.generativeai\n",
|
||||
"import anthropic\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"import gradio as gr\n",
|
||||
"import subprocess"
|
||||
"import subprocess\n",
|
||||
"from IPython.display import Markdown, display"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -84,11 +76,51 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')"
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
||||
"grok_api_key = os.getenv('GROK_API_KEY')\n",
|
||||
"\n",
|
||||
"if openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set\")\n",
|
||||
" \n",
|
||||
"if anthropic_api_key:\n",
|
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Anthropic API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if google_api_key:\n",
|
||||
" print(f\"Google API Key exists and begins {google_api_key[:2]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Google API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if grok_api_key:\n",
|
||||
" print(f\"Grok API Key exists and begins {grok_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Grok API Key not set (and this is optional)\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59863df1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connect to client libraries\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"anthropic_url = \"https://api.anthropic.com/v1/\"\n",
|
||||
"gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
||||
"grok_url = \"https://api.x.ai/v1\"\n",
|
||||
"\n",
|
||||
"anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n",
|
||||
"gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)\n",
|
||||
"grok = OpenAI(api_key=grok_api_key, base_url=grok_url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,17 +130,110 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"# NOTE - option to use ultra-low cost models by uncommenting last 2 lines\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n",
|
||||
"OPENAI_MODEL = \"gpt-5\"\n",
|
||||
"CLAUDE_MODEL = \"claude-sonnet-4-5-20250929\"\n",
|
||||
"GROK_MODEL = \"grok-4\"\n",
|
||||
"GEMINI_MODEL = \"gemini-2.5-pro\"\n",
|
||||
"\n",
|
||||
"# Want to keep costs ultra-low? Uncomment these lines:\n",
|
||||
"# OPENAI_MODEL = \"gpt-4o-mini\"\n",
|
||||
"# CLAUDE_MODEL = \"claude-3-haiku-20240307\""
|
||||
"\n",
|
||||
"# OPENAI_MODEL = \"gpt-5-nano\"\n",
|
||||
"# CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n",
|
||||
"# GROK_MODEL = \"grok-4-fast-non-reasoning\"\n",
|
||||
"# GEMINI_MODEL = \"gemini-2.5-flash-lite\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eab38a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PLEASE NOTE:\n",
|
||||
"\n",
|
||||
"We will be writing a solution to convert Python into efficient, optimized C++ code for your machine, which can be compiled to native machine code and executed.\n",
|
||||
"\n",
|
||||
"It is not necessary for you to execute the code yourself - that's not the point of the exercise!\n",
|
||||
"\n",
|
||||
"But if you would like to (because it's satisfying!) then I'm including the steps here. Very optional!\n",
|
||||
"\n",
|
||||
"As an alternative, I'll also show you a website where you can run the C++ code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a2fbb68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from system_info import retrieve_system_info\n",
|
||||
"\n",
|
||||
"system_info = retrieve_system_info()\n",
|
||||
"system_info"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6d29a5f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = f\"\"\"\n",
|
||||
"Here is a report of the system information for my computer.\n",
|
||||
"I want to run a C++ compiler to compile a single C++ file called main.cpp and then execute it in the simplest way possible.\n",
|
||||
"Please reply with whether I need to install any C++ compiler to do this. If so, please provide the simplest step by step instructions to do so.\n",
|
||||
"\n",
|
||||
"If I'm already set up to compile C++ code, then I'd like to run something like this in Python to compile and execute the code:\n",
|
||||
"```python\n",
|
||||
"compile_command = # something here - to achieve the fastest possible runtime performance\n",
|
||||
"compile_result = subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
"run_command = # something here\n",
|
||||
"run_result = subprocess.run(run_command, check=True, text=True, capture_output=True)\n",
|
||||
"return run_result.stdout\n",
|
||||
"```\n",
|
||||
"Please tell me exactly what I should use for the compile_command and run_command.\n",
|
||||
"\n",
|
||||
"System information:\n",
|
||||
"{system_info}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(model=OPENAI_MODEL, messages=[{\"role\": \"user\", \"content\": message}])\n",
|
||||
"display(Markdown(response.choices[0].message.content))\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81e92c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## If you need to install something\n",
|
||||
"\n",
|
||||
"If you would like to, please follow GPTs instructions! Then rerun the analysis afterwards (you might need to Restart the notebook) to confirm you're set.\n",
|
||||
"\n",
|
||||
"You should now be equipped with the command to compile the code, and the command to run it!\n",
|
||||
"\n",
|
||||
"Enter that in the cell below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d734a634",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_command = [\"clang++\", \"-std=c++17\", \"-Ofast\", \"-mcpu=native\", \"-flto=thin\", \"-fvisibility=hidden\", \"-DNDEBUG\", \"main.cpp\", \"-o\", \"main\"]\n",
|
||||
"run_command = [\"./main\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b0a437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now, on with the main task"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -118,9 +243,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
|
||||
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
|
||||
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time.\""
|
||||
"system_prompt = \"\"\"\n",
|
||||
"Your task is to convert Python code into high performance C++ code.\n",
|
||||
"Respond only with C++ code. Do not provide any explanation other than occasional comments.\n",
|
||||
"The C++ response needs to produce an identical output in the fastest possible time.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def user_prompt_for(python):\n",
|
||||
" return f\"\"\"\n",
|
||||
"Port this Python code to C++ with the fastest possible implementation that produces identical output in the least time.\n",
|
||||
"The system information is:\n",
|
||||
"{system_info}\n",
|
||||
"Your response will be written to a file called main.cpp and then compiled and executed; the compilation command is:\n",
|
||||
"{compile_command}\n",
|
||||
"Respond only with C++ code.\n",
|
||||
"Python code to port:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{python}\n",
|
||||
"```\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,12 +272,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for(python):\n",
|
||||
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
|
||||
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
|
||||
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
|
||||
" user_prompt += python\n",
|
||||
" return user_prompt"
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -145,26 +287,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# write to a file called optimized.cpp\n",
|
||||
"\n",
|
||||
"def write_output(cpp):\n",
|
||||
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
|
||||
" with open(\"optimized.cpp\", \"w\") as f:\n",
|
||||
" f.write(code)"
|
||||
" with open(\"main.cpp\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||
" f.write(cpp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -174,35 +299,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" print(fragment, end='', flush=True)\n",
|
||||
" write_output(reply)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" print(text, end=\"\", flush=True)\n",
|
||||
"def port(client, model, python):\n",
|
||||
" reasoning_effort = \"high\" if 'gpt' in model else None\n",
|
||||
" response = client.chat.completions.create(model=model, messages=messages_for(python), reasoning_effort=reasoning_effort)\n",
|
||||
" reply = response.choices[0].message.content\n",
|
||||
" reply = reply.replace('```cpp','').replace('```','')\n",
|
||||
" write_output(reply)"
|
||||
]
|
||||
},
|
||||
@@ -226,7 +327,7 @@
|
||||
" return result\n",
|
||||
"\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = calculate(100_000_000, 4, 1) * 4\n",
|
||||
"result = calculate(200_000_000, 4, 1) * 4\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(f\"Result: {result:.12f}\")\n",
|
||||
@@ -241,7 +342,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
"def run_python(code):\n",
|
||||
" globals = {\"__builtins__\": __builtins__}\n",
|
||||
" exec(code, globals)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7faa90da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_python(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -251,17 +364,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
"port(openai, OPENAI_MODEL, pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -271,18 +374,12 @@
|
||||
"source": [
|
||||
"# Compiling C++ and executing\n",
|
||||
"\n",
|
||||
"This next cell contains the command to compile a C++ file on my M1 Mac. \n",
|
||||
"It compiles the file `optimized.cpp` into an executable called `optimized` \n",
|
||||
"Then it runs the program called `optimized`\n",
|
||||
"\n",
|
||||
"In the next lab (day4), a student has contributed a full solution that compiles to efficient code on Mac, PC and Linux!\n",
|
||||
"\n",
|
||||
"You can wait for this, or you can google (or ask ChatGPT!) for how to do this on your platform, then replace the lines below.\n",
|
||||
"If you're not comfortable with this step, you can skip it for sure - I'll show you exactly how it performs on my Mac.\n",
|
||||
"This next cell contains the command to compile a C++ file based on the instructions from GPT.\n",
|
||||
"\n",
|
||||
"Again, it's not crucial to do this step if you don't wish to!\n",
|
||||
"\n",
|
||||
"OR alternatively: student Sandeep K.G. points out that you can run Python and C++ code online to test it out that way. Thank you Sandeep! \n",
|
||||
"> Not an exact comparison but you can still get the idea of performance difference.\n",
|
||||
"> Not an exact comparison but you can still get the idea of performance difference. \n",
|
||||
"> For example here: https://www.programiz.com/cpp-programming/online-compiler/"
|
||||
]
|
||||
},
|
||||
@@ -293,10 +390,41 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Compile C++ and run the executable\n",
|
||||
"# Use the commands from GPT 5\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
"def compile_and_run():\n",
|
||||
" subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "22f8f43a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "faaa39de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"19.178207/0.082168"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f3b8ef9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OK let's try the other contenders!"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -306,294 +434,79 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(pi)"
|
||||
"port(anthropic, CLAUDE_MODEL, pi)\n",
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
|
||||
"id": "138f63c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Repeat for Claude - again, use the right approach for your platform\n",
|
||||
"port(grok, GROK_MODEL, pi)\n",
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a0243c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"port(gemini, GEMINI_MODEL, pi)\n",
|
||||
"compile_and_run()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0689e200",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a6ffb0bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(f\"\"\"\n",
|
||||
"In Ed's experiments, the performance speedups were:\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
"4th place: Claude Sonnet 4.5: {19.178207/0.104241:.0f}X speedup\n",
|
||||
"3rd place: GPT-5: {19.178207/0.082168:.0f}X speedup\n",
|
||||
"2nd place: Grok 4: {19.178207/0.018092:.0f}X speedup\n",
|
||||
"1st place: Gemini 2.5 Pro: {19.178207/0.013314:.0f}X speedup\n",
|
||||
"\"\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"id": "8d58753b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_hard = \"\"\"# Be careful to support large number sizes\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
" \n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\n",
|
||||
"# Parameters\n",
|
||||
"n = 10000 # Number of random numbers\n",
|
||||
"initial_seed = 42 # Initial seed for the LCG\n",
|
||||
"min_val = -10 # Minimum value of random numbers\n",
|
||||
"max_val = 10 # Maximum value of random numbers\n",
|
||||
"\n",
|
||||
"# Timing the function\n",
|
||||
"import time\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
|
||||
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
|
||||
"\"\"\""
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
|
||||
"id": "7202e513",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Replace this with the right C++ compile + execute command for your platform\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Replace this with the right C++ compile + execute command for your platform\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8669f56b-8314-4582-a167-78842caea131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19bf2bff-a822-4009-a539-f003b1651383",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def execute_python(code):\n",
|
||||
" try:\n",
|
||||
" output = io.StringIO()\n",
|
||||
" sys.stdout = output\n",
|
||||
" exec(code)\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = sys.__stdout__\n",
|
||||
" return output.getvalue()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You'll need to change the code in the try block to compile the C++ code for your platform\n",
|
||||
"# I pasted this into Claude's chat UI with a request for it to give me a version for an Intel PC,\n",
|
||||
"# and it responded with something that looks perfect - you can try a similar approach for your platform.\n",
|
||||
"\n",
|
||||
"# M1 Mac version to compile and execute optimized C++ code:\n",
|
||||
"\n",
|
||||
"def execute_cpp(code):\n",
|
||||
" write_output(code)\n",
|
||||
" try:\n",
|
||||
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
|
||||
" compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" run_cmd = [\"./optimized\"]\n",
|
||||
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" return run_result.stdout\n",
|
||||
" except subprocess.CalledProcessError as e:\n",
|
||||
" return f\"An error occurred:\\n{e.stderr}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"css = \"\"\"\n",
|
||||
".python {background-color: #306998;}\n",
|
||||
".cpp {background-color: #050;}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1303932-160c-424b-97a8-d28c816721b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks(css=css) as ui:\n",
|
||||
" gr.Markdown(\"## Convert code from Python to C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" with gr.Row():\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_run = gr.Button(\"Run Python\")\n",
|
||||
" cpp_run = gr.Button(\"Run C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
|
||||
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
|
||||
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -607,7 +520,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
Reference in New Issue
Block a user