Launching refreshed version of LLM Engineering weeks 1-4 - see README
This commit is contained in:
655
week4/day3.ipynb
655
week4/day3.ipynb
@@ -18,18 +18,15 @@
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" <img src=\"../assets/resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#f71;\">Reminder: fetch latest code</h2>\n",
|
||||
" <span style=\"color:#f71;\">I'm continually improving these labs, adding more examples and exercises.\n",
|
||||
" At the start of each week, it's worth checking you have the latest code.<br/>\n",
|
||||
" First do a <a href=\"https://chatgpt.com/share/6734e705-3270-8012-a074-421661af6ba9\">git pull and merge your changes as needed</a>. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!<br/><br/>\n",
|
||||
" After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:<br/>\n",
|
||||
" <code>conda env update --f environment.yml --prune</code><br/>\n",
|
||||
" Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):<br/>\n",
|
||||
" <code>pip install -r requirements.txt</code>\n",
|
||||
" <br/>Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n",
|
||||
" After you've pulled the code, from the llm_engineering directory, in a Cursor Terminal, run:<br/>\n",
|
||||
" <code>uv sync</code><br/>\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
@@ -44,12 +41,12 @@
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" <img src=\"../assets/important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h1 style=\"color:#900;\">Important Note</h1>\n",
|
||||
" <span style=\"color:#900;\">\n",
|
||||
" In this lab, I use GPT-4o and Claude-3.5-Sonnet, which are the slightly higher priced models. The costs are still low, but if you'd prefer to keep costs ultra low, please make the suggested switches to the models (3 cells down from here).\n",
|
||||
" In this lab, I use high end models GPT 5, Claude 4.5 Sonnet, Gemini 2.5 Pro, Grok 4, which are the slightly higher priced models. The costs are still low, but if you'd prefer to keep costs ultra low, please pick lower cost models like gpt-5-nano.\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
@@ -66,15 +63,10 @@
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import io\n",
|
||||
"import sys\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import google.generativeai\n",
|
||||
"import anthropic\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"import gradio as gr\n",
|
||||
"import subprocess"
|
||||
"import subprocess\n",
|
||||
"from IPython.display import Markdown, display"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -84,11 +76,51 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')"
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
||||
"grok_api_key = os.getenv('GROK_API_KEY')\n",
|
||||
"\n",
|
||||
"if openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set\")\n",
|
||||
" \n",
|
||||
"if anthropic_api_key:\n",
|
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Anthropic API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if google_api_key:\n",
|
||||
" print(f\"Google API Key exists and begins {google_api_key[:2]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Google API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if grok_api_key:\n",
|
||||
" print(f\"Grok API Key exists and begins {grok_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Grok API Key not set (and this is optional)\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59863df1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connect to client libraries\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"anthropic_url = \"https://api.anthropic.com/v1/\"\n",
|
||||
"gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
||||
"grok_url = \"https://api.x.ai/v1\"\n",
|
||||
"\n",
|
||||
"anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n",
|
||||
"gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)\n",
|
||||
"grok = OpenAI(api_key=grok_api_key, base_url=grok_url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,17 +130,110 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"# NOTE - option to use ultra-low cost models by uncommenting last 2 lines\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n",
|
||||
"OPENAI_MODEL = \"gpt-5\"\n",
|
||||
"CLAUDE_MODEL = \"claude-sonnet-4-5-20250929\"\n",
|
||||
"GROK_MODEL = \"grok-4\"\n",
|
||||
"GEMINI_MODEL = \"gemini-2.5-pro\"\n",
|
||||
"\n",
|
||||
"# Want to keep costs ultra-low? Uncomment these lines:\n",
|
||||
"# OPENAI_MODEL = \"gpt-4o-mini\"\n",
|
||||
"# CLAUDE_MODEL = \"claude-3-haiku-20240307\""
|
||||
"\n",
|
||||
"# OPENAI_MODEL = \"gpt-5-nano\"\n",
|
||||
"# CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n",
|
||||
"# GROK_MODEL = \"grok-4-fast-non-reasoning\"\n",
|
||||
"# GEMINI_MODEL = \"gemini-2.5-flash-lite\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eab38a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## PLEASE NOTE:\n",
|
||||
"\n",
|
||||
"We will be writing a solution to convert Python into efficient, optimized C++ code for your machine, which can be compiled to native machine code and executed.\n",
|
||||
"\n",
|
||||
"It is not necessary for you to execute the code yourself - that's not the point of the exercise!\n",
|
||||
"\n",
|
||||
"But if you would like to (because it's satisfying!) then I'm including the steps here. Very optional!\n",
|
||||
"\n",
|
||||
"As an alternative, I'll also show you a website where you can run the C++ code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a2fbb68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from system_info import retrieve_system_info\n",
|
||||
"\n",
|
||||
"system_info = retrieve_system_info()\n",
|
||||
"system_info"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6d29a5f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = f\"\"\"\n",
|
||||
"Here is a report of the system information for my computer.\n",
|
||||
"I want to run a C++ compiler to compile a single C++ file called main.cpp and then execute it in the simplest way possible.\n",
|
||||
"Please reply with whether I need to install any C++ compiler to do this. If so, please provide the simplest step by step instructions to do so.\n",
|
||||
"\n",
|
||||
"If I'm already set up to compile C++ code, then I'd like to run something like this in Python to compile and execute the code:\n",
|
||||
"```python\n",
|
||||
"compile_command = # something here - to achieve the fastest possible runtime performance\n",
|
||||
"compile_result = subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
"run_command = # something here\n",
|
||||
"run_result = subprocess.run(run_command, check=True, text=True, capture_output=True)\n",
|
||||
"return run_result.stdout\n",
|
||||
"```\n",
|
||||
"Please tell me exactly what I should use for the compile_command and run_command.\n",
|
||||
"\n",
|
||||
"System information:\n",
|
||||
"{system_info}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(model=OPENAI_MODEL, messages=[{\"role\": \"user\", \"content\": message}])\n",
|
||||
"display(Markdown(response.choices[0].message.content))\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81e92c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## If you need to install something\n",
|
||||
"\n",
|
||||
"If you would like to, please follow GPTs instructions! Then rerun the analysis afterwards (you might need to Restart the notebook) to confirm you're set.\n",
|
||||
"\n",
|
||||
"You should now be equipped with the command to compile the code, and the command to run it!\n",
|
||||
"\n",
|
||||
"Enter that in the cell below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d734a634",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_command = [\"clang++\", \"-std=c++17\", \"-Ofast\", \"-mcpu=native\", \"-flto=thin\", \"-fvisibility=hidden\", \"-DNDEBUG\", \"main.cpp\", \"-o\", \"main\"]\n",
|
||||
"run_command = [\"./main\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b0a437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now, on with the main task"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -118,9 +243,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
|
||||
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
|
||||
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time.\""
|
||||
"system_prompt = \"\"\"\n",
|
||||
"Your task is to convert Python code into high performance C++ code.\n",
|
||||
"Respond only with C++ code. Do not provide any explanation other than occasional comments.\n",
|
||||
"The C++ response needs to produce an identical output in the fastest possible time.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def user_prompt_for(python):\n",
|
||||
" return f\"\"\"\n",
|
||||
"Port this Python code to C++ with the fastest possible implementation that produces identical output in the least time.\n",
|
||||
"The system information is:\n",
|
||||
"{system_info}\n",
|
||||
"Your response will be written to a file called main.cpp and then compiled and executed; the compilation command is:\n",
|
||||
"{compile_command}\n",
|
||||
"Respond only with C++ code.\n",
|
||||
"Python code to port:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{python}\n",
|
||||
"```\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,12 +272,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for(python):\n",
|
||||
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
|
||||
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
|
||||
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
|
||||
" user_prompt += python\n",
|
||||
" return user_prompt"
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -145,26 +287,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# write to a file called optimized.cpp\n",
|
||||
"\n",
|
||||
"def write_output(cpp):\n",
|
||||
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
|
||||
" with open(\"optimized.cpp\", \"w\") as f:\n",
|
||||
" f.write(code)"
|
||||
" with open(\"main.cpp\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||
" f.write(cpp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -174,35 +299,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" print(fragment, end='', flush=True)\n",
|
||||
" write_output(reply)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" print(text, end=\"\", flush=True)\n",
|
||||
"def port(client, model, python):\n",
|
||||
" reasoning_effort = \"high\" if 'gpt' in model else None\n",
|
||||
" response = client.chat.completions.create(model=model, messages=messages_for(python), reasoning_effort=reasoning_effort)\n",
|
||||
" reply = response.choices[0].message.content\n",
|
||||
" reply = reply.replace('```cpp','').replace('```','')\n",
|
||||
" write_output(reply)"
|
||||
]
|
||||
},
|
||||
@@ -226,7 +327,7 @@
|
||||
" return result\n",
|
||||
"\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = calculate(100_000_000, 4, 1) * 4\n",
|
||||
"result = calculate(200_000_000, 4, 1) * 4\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(f\"Result: {result:.12f}\")\n",
|
||||
@@ -241,7 +342,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
"def run_python(code):\n",
|
||||
" globals = {\"__builtins__\": __builtins__}\n",
|
||||
" exec(code, globals)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7faa90da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_python(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -251,17 +364,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
"port(openai, OPENAI_MODEL, pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -271,18 +374,12 @@
|
||||
"source": [
|
||||
"# Compiling C++ and executing\n",
|
||||
"\n",
|
||||
"This next cell contains the command to compile a C++ file on my M1 Mac. \n",
|
||||
"It compiles the file `optimized.cpp` into an executable called `optimized` \n",
|
||||
"Then it runs the program called `optimized`\n",
|
||||
"\n",
|
||||
"In the next lab (day4), a student has contributed a full solution that compiles to efficient code on Mac, PC and Linux!\n",
|
||||
"\n",
|
||||
"You can wait for this, or you can google (or ask ChatGPT!) for how to do this on your platform, then replace the lines below.\n",
|
||||
"If you're not comfortable with this step, you can skip it for sure - I'll show you exactly how it performs on my Mac.\n",
|
||||
"This next cell contains the command to compile a C++ file based on the instructions from GPT.\n",
|
||||
"\n",
|
||||
"Again, it's not crucial to do this step if you don't wish to!\n",
|
||||
"\n",
|
||||
"OR alternatively: student Sandeep K.G. points out that you can run Python and C++ code online to test it out that way. Thank you Sandeep! \n",
|
||||
"> Not an exact comparison but you can still get the idea of performance difference.\n",
|
||||
"> Not an exact comparison but you can still get the idea of performance difference. \n",
|
||||
"> For example here: https://www.programiz.com/cpp-programming/online-compiler/"
|
||||
]
|
||||
},
|
||||
@@ -293,10 +390,41 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Compile C++ and run the executable\n",
|
||||
"# Use the commands from GPT 5\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
"def compile_and_run():\n",
|
||||
" subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "22f8f43a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "faaa39de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"19.178207/0.082168"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f3b8ef9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OK let's try the other contenders!"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -306,294 +434,79 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(pi)"
|
||||
"port(anthropic, CLAUDE_MODEL, pi)\n",
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
|
||||
"id": "138f63c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Repeat for Claude - again, use the right approach for your platform\n",
|
||||
"port(grok, GROK_MODEL, pi)\n",
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a0243c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"port(gemini, GEMINI_MODEL, pi)\n",
|
||||
"compile_and_run()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0689e200",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a6ffb0bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(f\"\"\"\n",
|
||||
"In Ed's experiments, the performance speedups were:\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
"4th place: Claude Sonnet 4.5: {19.178207/0.104241:.0f}X speedup\n",
|
||||
"3rd place: GPT-5: {19.178207/0.082168:.0f}X speedup\n",
|
||||
"2nd place: Grok 4: {19.178207/0.018092:.0f}X speedup\n",
|
||||
"1st place: Gemini 2.5 Pro: {19.178207/0.013314:.0f}X speedup\n",
|
||||
"\"\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"id": "8d58753b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_hard = \"\"\"# Be careful to support large number sizes\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
" \n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\n",
|
||||
"# Parameters\n",
|
||||
"n = 10000 # Number of random numbers\n",
|
||||
"initial_seed = 42 # Initial seed for the LCG\n",
|
||||
"min_val = -10 # Minimum value of random numbers\n",
|
||||
"max_val = 10 # Maximum value of random numbers\n",
|
||||
"\n",
|
||||
"# Timing the function\n",
|
||||
"import time\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
|
||||
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
|
||||
"\"\"\""
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
|
||||
"id": "7202e513",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Replace this with the right C++ compile + execute command for your platform\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Replace this with the right C++ compile + execute command for your platform\n",
|
||||
"\n",
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8669f56b-8314-4582-a167-78842caea131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19bf2bff-a822-4009-a539-f003b1651383",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def execute_python(code):\n",
|
||||
" try:\n",
|
||||
" output = io.StringIO()\n",
|
||||
" sys.stdout = output\n",
|
||||
" exec(code)\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = sys.__stdout__\n",
|
||||
" return output.getvalue()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You'll need to change the code in the try block to compile the C++ code for your platform\n",
|
||||
"# I pasted this into Claude's chat UI with a request for it to give me a version for an Intel PC,\n",
|
||||
"# and it responded with something that looks perfect - you can try a similar approach for your platform.\n",
|
||||
"\n",
|
||||
"# M1 Mac version to compile and execute optimized C++ code:\n",
|
||||
"\n",
|
||||
"def execute_cpp(code):\n",
|
||||
" write_output(code)\n",
|
||||
" try:\n",
|
||||
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
|
||||
" compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" run_cmd = [\"./optimized\"]\n",
|
||||
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" return run_result.stdout\n",
|
||||
" except subprocess.CalledProcessError as e:\n",
|
||||
" return f\"An error occurred:\\n{e.stderr}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"css = \"\"\"\n",
|
||||
".python {background-color: #306998;}\n",
|
||||
".cpp {background-color: #050;}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1303932-160c-424b-97a8-d28c816721b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks(css=css) as ui:\n",
|
||||
" gr.Markdown(\"## Convert code from Python to C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" with gr.Row():\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_run = gr.Button(\"Run Python\")\n",
|
||||
" cpp_run = gr.Button(\"Run C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
|
||||
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
|
||||
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -607,7 +520,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
865
week4/day4.ipynb
865
week4/day4.ipynb
@@ -7,30 +7,41 @@
|
||||
"source": [
|
||||
"# Code Generator\n",
|
||||
"\n",
|
||||
"The requirement: use an Open Source model to generate high performance C++ code from Python code\n",
|
||||
"\n",
|
||||
"To replicate this, you'll need to set up a HuggingFace endpoint as I do in the video. It's simple to do, and it's quite satisfying to see the results!\n",
|
||||
"\n",
|
||||
"It's also an important part of your learning; this is the first example of deploying an open source model to be behind an API. We'll return to this in Week 8, but this should plant a seed in your mind for what's involved in moving open source models into production."
|
||||
"The requirement: use a Frontier model to generate high performance C++ code from Python code\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22e1567b-33fd-49e7-866e-4b635d15715a",
|
||||
"id": "d5ccb926-7b49-44a4-99ab-8ef20b5778c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" <img src=\"../assets/resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h1 style=\"color:#900;\">Important - Pause Endpoints when not in use</h1>\n",
|
||||
" <h2 style=\"color:#f71;\">Reminder: OPTIONAL to execute C++ code</h2>\n",
|
||||
" <span style=\"color:#f71;\">As an alternative, you can run it on the website given yesterday</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d90e04a2-5b8a-4fd5-9db8-27c02f033313",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../assets/important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h1 style=\"color:#900;\">Important Note</h1>\n",
|
||||
" <span style=\"color:#900;\">\n",
|
||||
" If you do decide to use HuggingFace endpoints for this project, you should stop or pause the endpoints when you are done to avoid accruing unnecessary running cost. The costs are very low as long as you only run the endpoint when you're using it. Navigate to the HuggingFace endpoint UI <a href=\"https://ui.endpoints.huggingface.co/\">here,</a> open your endpoint, and click Pause to put it on pause so you no longer pay for it. \n",
|
||||
"Many thanks to student John L. for raising this.\n",
|
||||
"<br/><br/>\n",
|
||||
"In week 8 we will use Modal instead of HuggingFace endpoints; with Modal you only pay for the time that you use it and you should get free credits.\n",
|
||||
" In this lab, I use free open source models on Ollama. I also use paid open-source models via Groq and OpenRouter. Only pick the models you want to!\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
@@ -49,15 +60,10 @@
|
||||
"import os\n",
|
||||
"import io\n",
|
||||
"import sys\n",
|
||||
"import json\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import google.generativeai\n",
|
||||
"import anthropic\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"import gradio as gr\n",
|
||||
"import subprocess"
|
||||
"import subprocess\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,12 +73,70 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
|
||||
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')"
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
||||
"grok_api_key = os.getenv('GROK_API_KEY')\n",
|
||||
"groq_api_key = os.getenv('GROQ_API_KEY')\n",
|
||||
"openrouter_api_key = os.getenv('OPENROUTER_API_KEY')\n",
|
||||
"\n",
|
||||
"if openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set\")\n",
|
||||
" \n",
|
||||
"if anthropic_api_key:\n",
|
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Anthropic API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if google_api_key:\n",
|
||||
" print(f\"Google API Key exists and begins {google_api_key[:2]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Google API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if grok_api_key:\n",
|
||||
" print(f\"Grok API Key exists and begins {grok_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Grok API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if groq_api_key:\n",
|
||||
" print(f\"Groq API Key exists and begins {groq_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Groq API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if openrouter_api_key:\n",
|
||||
" print(f\"OpenRouter API Key exists and begins {openrouter_api_key[:6]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenRouter API Key not set (and this is optional)\")\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59863df1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connect to client libraries\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"anthropic_url = \"https://api.anthropic.com/v1/\"\n",
|
||||
"gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
||||
"grok_url = \"https://api.x.ai/v1\"\n",
|
||||
"groq_url = \"https://api.groq.com/openai/v1\"\n",
|
||||
"ollama_url = \"http://localhost:11434/v1\"\n",
|
||||
"openrouter_url = \"https://openrouter.ai/api/v1\"\n",
|
||||
"\n",
|
||||
"anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n",
|
||||
"gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)\n",
|
||||
"grok = OpenAI(api_key=grok_api_key, base_url=grok_url)\n",
|
||||
"groq = OpenAI(api_key=groq_api_key, base_url=groq_url)\n",
|
||||
"ollama = OpenAI(api_key=\"ollama\", base_url=ollama_url)\n",
|
||||
"openrouter = OpenAI(api_key=openrouter_api_key, base_url=openrouter_url)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -82,12 +146,55 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize\n",
|
||||
"models = [\"gpt-5\", \"claude-sonnet-4-5-20250929\", \"grok-4\", \"gemini-2.5-pro\", \"qwen2.5-coder\", \"deepseek-coder-v2\", \"gpt-oss:20b\", \"qwen/qwen3-coder-30b-a3b-instruct\", \"openai/gpt-oss-120b\", ]\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"OPENAI_MODEL = \"gpt-4o\"\n",
|
||||
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\""
|
||||
"clients = {\"gpt-5\": openai, \"claude-sonnet-4-5-20250929\": anthropic, \"grok-4\": grok, \"gemini-2.5-pro\": gemini, \"openai/gpt-oss-120b\": groq, \"qwen2.5-coder\": ollama, \"deepseek-coder-v2\": ollama, \"gpt-oss:20b\": ollama, \"qwen/qwen3-coder-30b-a3b-instruct\": openrouter}\n",
|
||||
"\n",
|
||||
"# Want to keep costs ultra-low? Replace this with models of your choice, using the examples from yesterday"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68c1f1be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from system_info import retrieve_system_info\n",
|
||||
"\n",
|
||||
"system_info = retrieve_system_info()\n",
|
||||
"system_info"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81e92c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overwrite this with the commands from yesterday\n",
|
||||
"\n",
|
||||
"Or just use the website like yesterday:\n",
|
||||
"\n",
|
||||
" https://www.programiz.com/cpp-programming/online-compiler/"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d734a634",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_command = [\"clang++\", \"-std=c++17\", \"-Ofast\", \"-mcpu=native\", \"-flto=thin\", \"-fvisibility=hidden\", \"-DNDEBUG\", \"main.cpp\", \"-o\", \"main\"]\n",
|
||||
"run_command = [\"./main\"]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b0a437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now, on with the main task"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -97,9 +204,26 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
|
||||
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
|
||||
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\""
|
||||
"system_prompt = \"\"\"\n",
|
||||
"Your task is to convert Python code into high performance C++ code.\n",
|
||||
"Respond only with C++ code. Do not provide any explanation other than occasional comments.\n",
|
||||
"The C++ response needs to produce an identical output in the fastest possible time.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def user_prompt_for(python):\n",
|
||||
" return f\"\"\"\n",
|
||||
"Port this Python code to C++ with the fastest possible implementation that produces identical output in the least time.\n",
|
||||
"The system information is:\n",
|
||||
"{system_info}\n",
|
||||
"Your response will be written to a file called main.cpp and then compiled and executed; the compilation command is:\n",
|
||||
"{compile_command}\n",
|
||||
"Respond only with C++ code.\n",
|
||||
"Python code to port:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{python}\n",
|
||||
"```\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,12 +233,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for(python):\n",
|
||||
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
|
||||
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
|
||||
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
|
||||
" user_prompt += python\n",
|
||||
" return user_prompt"
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -124,26 +248,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_message},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# write to a file called optimized.cpp\n",
|
||||
"\n",
|
||||
"def write_output(cpp):\n",
|
||||
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
|
||||
" with open(\"optimized.cpp\", \"w\") as f:\n",
|
||||
" f.write(code)"
|
||||
" with open(\"main.cpp\", \"w\") as f:\n",
|
||||
" f.write(cpp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,36 +260,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" print(fragment, end='', flush=True)\n",
|
||||
" write_output(reply)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" print(text, end=\"\", flush=True)\n",
|
||||
" write_output(reply)"
|
||||
"def port(model, python):\n",
|
||||
" client = clients[model]\n",
|
||||
" reasoning_effort = \"high\" if 'gpt' in model else None\n",
|
||||
" response = client.chat.completions.create(model=model, messages=messages_for(python), reasoning_effort=reasoning_effort)\n",
|
||||
" reply = response.choices[0].message.content\n",
|
||||
" reply = reply.replace('```cpp','').replace('```','')\n",
|
||||
" write_output(reply)\n",
|
||||
" return reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +290,7 @@
|
||||
" return result\n",
|
||||
"\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = calculate(100_000_000, 4, 1) * 4\n",
|
||||
"result = calculate(200_000_000, 4, 1) * 4\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(f\"Result: {result:.12f}\")\n",
|
||||
@@ -220,27 +305,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "105db6f9-343c-491d-8e44-3a5328b81719",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(pi)"
|
||||
"def run_python(code):\n",
|
||||
" globals_dict = {\"__builtins__\": __builtins__}\n",
|
||||
"\n",
|
||||
" buffer = io.StringIO()\n",
|
||||
" old_stdout = sys.stdout\n",
|
||||
" sys.stdout = buffer\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" exec(code, globals_dict)\n",
|
||||
" output = buffer.getvalue()\n",
|
||||
" except Exception as e:\n",
|
||||
" output = f\"Error: {e}\"\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = old_stdout\n",
|
||||
"\n",
|
||||
" return output"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -250,188 +330,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(pi)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"python_hard = \"\"\"# Be careful to support large number sizes\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
" \n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\n",
|
||||
"# Parameters\n",
|
||||
"n = 10000 # Number of random numbers\n",
|
||||
"initial_seed = 42 # Initial seed for the LCG\n",
|
||||
"min_val = -10 # Minimum value of random numbers\n",
|
||||
"max_val = 10 # Maximum value of random numbers\n",
|
||||
"\n",
|
||||
"# Timing the function\n",
|
||||
"import time\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
|
||||
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_gpt(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimize_claude(python_hard)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
|
||||
"!./optimized"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_gpt(python): \n",
|
||||
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
|
||||
" reply = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" fragment = chunk.choices[0].delta.content or \"\"\n",
|
||||
" reply += fragment\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8669f56b-8314-4582-a167-78842caea131",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_claude(python):\n",
|
||||
" result = claude.messages.stream(\n",
|
||||
" model=CLAUDE_MODEL,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" system=system_message,\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
|
||||
" )\n",
|
||||
" reply = \"\"\n",
|
||||
" with result as stream:\n",
|
||||
" for text in stream.text_stream:\n",
|
||||
" reply += text\n",
|
||||
" yield reply.replace('```cpp\\n','').replace('```','')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far "
|
||||
"def compile_and_run():\n",
|
||||
" try:\n",
|
||||
" subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" print(subprocess.run(run_command, check=True, text=True, capture_output=True).stdout)\n",
|
||||
" except subprocess.CalledProcessError as e:\n",
|
||||
" print(f\"An error occurred:\\n{e.stderr}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,13 +349,13 @@
|
||||
"source": [
|
||||
"with gr.Blocks() as ui:\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", lines=28, value=pi)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=28)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" model = gr.Dropdown(models, label=\"Select model\", value=models[0])\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
" convert.click(port, inputs=[model, python], outputs=[cpp])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
@@ -457,376 +363,63 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19bf2bff-a822-4009-a539-f003b1651383",
|
||||
"id": "28969928",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def execute_python(code):\n",
|
||||
" try:\n",
|
||||
" output = io.StringIO()\n",
|
||||
" sys.stdout = output\n",
|
||||
" exec(code)\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = sys.__stdout__\n",
|
||||
" return output.getvalue()"
|
||||
]
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
|
||||
"id": "d9cc1c03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def execute_cpp(code):\n",
|
||||
" write_output(code)\n",
|
||||
" compiler_cmd = [\"clang++\", \"-O3\", \"-std=c++17\", \"-march=armv8.3-a\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
|
||||
" try:\n",
|
||||
" compile_result = subprocess.run(compiler_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" run_cmd = [\"./optimized\"]\n",
|
||||
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
|
||||
" return run_result.stdout\n",
|
||||
" except subprocess.CalledProcessError as e:\n",
|
||||
" return f\"An error occurred:\\n{e.stderr}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"css = \"\"\"\n",
|
||||
".python {background-color: #306998;}\n",
|
||||
".cpp {background-color: #050;}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1303932-160c-424b-97a8-d28c816721b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with gr.Blocks(css=css) as ui:\n",
|
||||
" gr.Markdown(\"## Convert code from Python to C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" with gr.Row():\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_run = gr.Button(\"Run Python\")\n",
|
||||
" cpp_run = gr.Button(\"Run C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
|
||||
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
|
||||
"\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
|
||||
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb8c5b4e-ec51-4f21-b3f8-6aa94fede86d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from huggingface_hub import login, InferenceClient\n",
|
||||
"from transformers import AutoTokenizer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "13347633-4606-4e38-9927-80c39e65c1f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hf_token = os.environ['HF_TOKEN']\n",
|
||||
"login(hf_token, add_to_git_credential=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ef60a4df-6267-4ebd-8eed-dcb917af0a5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n",
|
||||
"code_gemma = \"google/codegemma-7b-it\"\n",
|
||||
"CODE_QWEN_URL = \"https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud\"\n",
|
||||
"CODE_GEMMA_URL = \"https://c5hggiyqachmgnqg.us-east-1.aws.endpoints.huggingface.cloud\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "695ce389-a903-4533-a2f1-cd9e2a6af8f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
|
||||
"messages = messages_for(pi)\n",
|
||||
"text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d4548e96-0b32-4793-bdd6-1b072c2f26ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb2a126b-09e7-4966-bc97-0ef5c2cc7896",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
|
||||
"stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
|
||||
"for r in stream:\n",
|
||||
" print(r.token.text, end = \"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "127a52e5-ad85-42b7-a0f5-9afda5efe090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def stream_code_qwen(python):\n",
|
||||
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
|
||||
" messages = messages_for(python)\n",
|
||||
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
|
||||
" client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
|
||||
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
|
||||
" result = \"\"\n",
|
||||
" for r in stream:\n",
|
||||
" result += r.token.text\n",
|
||||
" yield result "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a82387d1-7651-4923-995b-fe18356fcaa6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def optimize(python, model):\n",
|
||||
" if model==\"GPT\":\n",
|
||||
" result = stream_gpt(python)\n",
|
||||
" elif model==\"Claude\":\n",
|
||||
" result = stream_claude(python)\n",
|
||||
" elif model==\"CodeQwen\":\n",
|
||||
" result = stream_code_qwen(python)\n",
|
||||
" else:\n",
|
||||
" raise ValueError(\"Unknown model\")\n",
|
||||
" for stream_so_far in result:\n",
|
||||
" yield stream_so_far "
|
||||
"compile_and_run()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b0a6a97-5b8a-4a9b-8ee0-7561e0ced673",
|
||||
"id": "80037156",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../thankyou.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#090;\">Thank you to @CloudLlama for an amazing contribution</h2>\n",
|
||||
" <span style=\"color:#090;\">\n",
|
||||
" A student has contributed a chunk of code to improve this, in the next 2 cells. You can now select which Python porgram to run,\n",
|
||||
" and a compiler is automatically selected that will work on PC, Windows and Mac. Massive thank you @CloudLlama!\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
"Qwen 2.5 Coder: Fail \n",
|
||||
"DeepSeek Coder v2: 0.114050084 \n",
|
||||
"OpenAI gpt-oss 20B: 0.080438 \n",
|
||||
"Qwen 30B: 0.113734 \n",
|
||||
"OpenAI gpt-oss 120B: 1.407383\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4ba311ec-c16a-4fe0-946b-4b940704cf65",
|
||||
"cell_type": "markdown",
|
||||
"id": "ad8d4e52",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def select_sample_program(sample_program):\n",
|
||||
" if sample_program==\"pi\":\n",
|
||||
" return pi\n",
|
||||
" elif sample_program==\"python_hard\":\n",
|
||||
" return python_hard\n",
|
||||
" else:\n",
|
||||
" return \"Type your Python program here\""
|
||||
"In Ed's experiments, the performance speedups were:\n",
|
||||
"\n",
|
||||
"9th place: Qwen 2.5 Coder: Fail \n",
|
||||
"8th place: OpenAI GPT-OSS 120B: 14X speedup \n",
|
||||
"7th place: DeepSeek Coder v2: 168X speedup \n",
|
||||
"6th place: Qwen3 Coder 30B: 168X speedup \n",
|
||||
"5th place: Claude Sonnet 4.5: 184X speedup \n",
|
||||
"4th place: GPT-5: 233X speedup \n",
|
||||
"**3rd place: oss-20B: 238X speedup** \n",
|
||||
"2nd place: Grok 4: 1060X speedup \n",
|
||||
"1st place: Gemini 2.5 Pro: 1440X speedup "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e42286bc-085c-45dc-b101-234308e58269",
|
||||
"cell_type": "markdown",
|
||||
"id": "6e617df9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import platform\n",
|
||||
"\n",
|
||||
"VISUAL_STUDIO_2022_TOOLS = \"C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\Tools\\\\VsDevCmd.bat\"\n",
|
||||
"VISUAL_STUDIO_2019_TOOLS = \"C:\\\\Program Files (x86)\\\\Microsoft Visual Studio\\\\2019\\\\BuildTools\\\\Common7\\\\Tools\\\\VsDevCmd.bat\"\n",
|
||||
"\n",
|
||||
"simple_cpp = \"\"\"\n",
|
||||
"#include <iostream>\n",
|
||||
"\n",
|
||||
"int main() {\n",
|
||||
" std::cout << \"Hello\";\n",
|
||||
" return 0;\n",
|
||||
"}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def run_cmd(command_to_run):\n",
|
||||
" try:\n",
|
||||
" run_result = subprocess.run(command_to_run, check=True, text=True, capture_output=True)\n",
|
||||
" return run_result.stdout if run_result.stdout else \"SUCCESS\"\n",
|
||||
" except:\n",
|
||||
" return \"\"\n",
|
||||
"\n",
|
||||
"def c_compiler_cmd(filename_base):\n",
|
||||
" my_platform = platform.system()\n",
|
||||
" my_compiler = []\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" with open(\"simple.cpp\", \"w\") as f:\n",
|
||||
" f.write(simple_cpp)\n",
|
||||
" \n",
|
||||
" if my_platform == \"Windows\":\n",
|
||||
" if os.path.isfile(VISUAL_STUDIO_2022_TOOLS):\n",
|
||||
" if os.path.isfile(\"./simple.exe\"):\n",
|
||||
" os.remove(\"./simple.exe\")\n",
|
||||
" compile_cmd = [\"cmd\", \"/c\", VISUAL_STUDIO_2022_TOOLS, \"&\", \"cl\", \"simple.cpp\"]\n",
|
||||
" if run_cmd(compile_cmd):\n",
|
||||
" if run_cmd([\"./simple.exe\"]) == \"Hello\":\n",
|
||||
" my_compiler = [\"Windows\", \"Visual Studio 2022\", [\"cmd\", \"/c\", VISUAL_STUDIO_2022_TOOLS, \"&\", \"cl\", f\"{filename_base}.cpp\"]]\n",
|
||||
" \n",
|
||||
" if not my_compiler:\n",
|
||||
" if os.path.isfile(VISUAL_STUDIO_2019_TOOLS):\n",
|
||||
" if os.path.isfile(\"./simple.exe\"):\n",
|
||||
" os.remove(\"./simple.exe\")\n",
|
||||
" compile_cmd = [\"cmd\", \"/c\", VISUAL_STUDIO_2019_TOOLS, \"&\", \"cl\", \"simple.cpp\"]\n",
|
||||
" if run_cmd(compile_cmd):\n",
|
||||
" if run_cmd([\"./simple.exe\"]) == \"Hello\":\n",
|
||||
" my_compiler = [\"Windows\", \"Visual Studio 2019\", [\"cmd\", \"/c\", VISUAL_STUDIO_2019_TOOLS, \"&\", \"cl\", f\"{filename_base}.cpp\"]]\n",
|
||||
" \n",
|
||||
" if not my_compiler:\n",
|
||||
" my_compiler=[my_platform, \"Unavailable\", []]\n",
|
||||
" \n",
|
||||
" elif my_platform == \"Linux\":\n",
|
||||
" if os.path.isfile(\"./simple\"):\n",
|
||||
" os.remove(\"./simple\")\n",
|
||||
" compile_cmd = [\"g++\", \"simple.cpp\", \"-o\", \"simple\"]\n",
|
||||
" if run_cmd(compile_cmd):\n",
|
||||
" if run_cmd([\"./simple\"]) == \"Hello\":\n",
|
||||
" my_compiler = [\"Linux\", \"GCC (g++)\", [\"g++\", f\"{filename_base}.cpp\", \"-o\", f\"{filename_base}\" ]]\n",
|
||||
" \n",
|
||||
" if not my_compiler:\n",
|
||||
" if os.path.isfile(\"./simple\"):\n",
|
||||
" os.remove(\"./simple\")\n",
|
||||
" compile_cmd = [\"clang++\", \"simple.cpp\", \"-o\", \"simple\"]\n",
|
||||
" if run_cmd(compile_cmd):\n",
|
||||
" if run_cmd([\"./simple\"]) == \"Hello\":\n",
|
||||
" my_compiler = [\"Linux\", \"Clang++\", [\"clang++\", f\"{filename_base}.cpp\", \"-o\", f\"{filename_base}\"]]\n",
|
||||
" \n",
|
||||
" if not my_compiler:\n",
|
||||
" my_compiler=[my_platform, \"Unavailable\", []]\n",
|
||||
" \n",
|
||||
" elif my_platform == \"Darwin\":\n",
|
||||
" if os.path.isfile(\"./simple\"):\n",
|
||||
" os.remove(\"./simple\")\n",
|
||||
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"simple\", \"simple.cpp\"]\n",
|
||||
" if run_cmd(compile_cmd):\n",
|
||||
" if run_cmd([\"./simple\"]) == \"Hello\":\n",
|
||||
" my_compiler = [\"Macintosh\", \"Clang++\", [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", f\"{filename_base}\", f\"{filename_base}.cpp\"]]\n",
|
||||
" \n",
|
||||
" if not my_compiler:\n",
|
||||
" my_compiler=[my_platform, \"Unavailable\", []]\n",
|
||||
" except:\n",
|
||||
" my_compiler=[my_platform, \"Unavailable\", []]\n",
|
||||
" \n",
|
||||
" if my_compiler:\n",
|
||||
" return my_compiler\n",
|
||||
" else:\n",
|
||||
" return [\"Unknown\", \"Unavailable\", []]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compiler_cmd = c_compiler_cmd(\"optimized\")\n",
|
||||
"\n",
|
||||
"with gr.Blocks(css=css) as ui:\n",
|
||||
" gr.Markdown(\"## Convert code from Python to C++\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
|
||||
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
|
||||
" with gr.Row():\n",
|
||||
" with gr.Column():\n",
|
||||
" sample_program = gr.Radio([\"pi\", \"python_hard\"], label=\"Sample program\", value=\"python_hard\")\n",
|
||||
" model = gr.Dropdown([\"GPT\", \"Claude\", \"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n",
|
||||
" with gr.Column():\n",
|
||||
" architecture = gr.Radio([compiler_cmd[0]], label=\"Architecture\", interactive=False, value=compiler_cmd[0])\n",
|
||||
" compiler = gr.Radio([compiler_cmd[1]], label=\"Compiler\", interactive=False, value=compiler_cmd[1])\n",
|
||||
" with gr.Row():\n",
|
||||
" convert = gr.Button(\"Convert code\")\n",
|
||||
" with gr.Row():\n",
|
||||
" python_run = gr.Button(\"Run Python\")\n",
|
||||
" if not compiler_cmd[1] == \"Unavailable\":\n",
|
||||
" cpp_run = gr.Button(\"Run C++\")\n",
|
||||
" else:\n",
|
||||
" cpp_run = gr.Button(\"No compiler to run C++\", interactive=False)\n",
|
||||
" with gr.Row():\n",
|
||||
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
|
||||
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
|
||||
"\n",
|
||||
" sample_program.change(select_sample_program, inputs=[sample_program], outputs=[python])\n",
|
||||
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
|
||||
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
|
||||
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d0ad093-425b-488e-8c3f-67f729dd9c06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -840,7 +433,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
522
week4/day5.ipynb
Normal file
522
week4/day5.ipynb
Normal file
@@ -0,0 +1,522 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Code Generator\n",
|
||||
"\n",
|
||||
"The requirement: use a Frontier model to generate high performance C++ code from Python code\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d5ccb926-7b49-44a4-99ab-8ef20b5778c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../assets/resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#f71;\">Reminder: OPTIONAL to execute C++ code or Rust code</h2>\n",
|
||||
" <span style=\"color:#f71;\">As an alternative, you can run it on the website given yesterday</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d90e04a2-5b8a-4fd5-9db8-27c02f033313",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../assets/important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h1 style=\"color:#900;\">Important Note</h1>\n",
|
||||
" <span style=\"color:#900;\">\n",
|
||||
" In this lab, I use high end models GPT 5, Claude 4.5 Sonnet, Gemini 2.5 Pro, Grok 4, which are the slightly higher priced models. The costs are still low, but if you'd prefer to keep costs ultra low, please pick lower cost models like gpt-5-nano.\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import io\n",
|
||||
"import sys\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import gradio as gr\n",
|
||||
"import subprocess\n",
|
||||
"from IPython.display import Markdown, display\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"load_dotenv(override=True)\n",
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
||||
"grok_api_key = os.getenv('GROK_API_KEY')\n",
|
||||
"groq_api_key = os.getenv('GROQ_API_KEY')\n",
|
||||
"openrouter_api_key = os.getenv('OPENROUTER_API_KEY')\n",
|
||||
"\n",
|
||||
"if openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set\")\n",
|
||||
" \n",
|
||||
"if anthropic_api_key:\n",
|
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Anthropic API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if google_api_key:\n",
|
||||
" print(f\"Google API Key exists and begins {google_api_key[:2]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Google API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if grok_api_key:\n",
|
||||
" print(f\"Grok API Key exists and begins {grok_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Grok API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if groq_api_key:\n",
|
||||
" print(f\"Groq API Key exists and begins {groq_api_key[:4]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Groq API Key not set (and this is optional)\")\n",
|
||||
"\n",
|
||||
"if openrouter_api_key:\n",
|
||||
" print(f\"OpenRouter API Key exists and begins {openrouter_api_key[:6]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenRouter API Key not set (and this is optional)\")\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59863df1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connect to client libraries\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"anthropic_url = \"https://api.anthropic.com/v1/\"\n",
|
||||
"gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
||||
"grok_url = \"https://api.x.ai/v1\"\n",
|
||||
"groq_url = \"https://api.groq.com/openai/v1\"\n",
|
||||
"ollama_url = \"http://localhost:11434/v1\"\n",
|
||||
"openrouter_url = \"https://openrouter.ai/api/v1\"\n",
|
||||
"\n",
|
||||
"anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n",
|
||||
"gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)\n",
|
||||
"grok = OpenAI(api_key=grok_api_key, base_url=grok_url)\n",
|
||||
"groq = OpenAI(api_key=groq_api_key, base_url=groq_url)\n",
|
||||
"ollama = OpenAI(api_key=\"ollama\", base_url=ollama_url)\n",
|
||||
"openrouter = OpenAI(api_key=openrouter_api_key, base_url=openrouter_url)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"models = [\"gpt-5\", \"claude-sonnet-4-5-20250929\", \"grok-4\", \"gemini-2.5-pro\", \"qwen2.5-coder\", \"deepseek-coder-v2\", \"gpt-oss:20b\", \"qwen/qwen3-coder-30b-a3b-instruct\", \"openai/gpt-oss-120b\", ]\n",
|
||||
"\n",
|
||||
"clients = {\"gpt-5\": openai, \"claude-sonnet-4-5-20250929\": anthropic, \"grok-4\": grok, \"gemini-2.5-pro\": gemini, \"openai/gpt-oss-120b\": groq, \"qwen2.5-coder\": ollama, \"deepseek-coder-v2\": ollama, \"gpt-oss:20b\": ollama, \"qwen/qwen3-coder-30b-a3b-instruct\": openrouter}\n",
|
||||
"\n",
|
||||
"# Want to keep costs ultra-low? Replace this with models of your choice, using the examples from yesterday"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68c1f1be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from system_info import retrieve_system_info, rust_toolchain_info\n",
|
||||
"\n",
|
||||
"system_info = retrieve_system_info()\n",
|
||||
"rust_info = rust_toolchain_info()\n",
|
||||
"rust_info"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b8bd44f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = f\"\"\"\n",
|
||||
"Here is a report of the system information for my computer.\n",
|
||||
"I want to run a Rust compiler to compile a single rust file called main.rs and then execute it in the simplest way possible.\n",
|
||||
"Please reply with whether I need to install a Rust toolchain to do this. If so, please provide the simplest step by step instructions to do so.\n",
|
||||
"\n",
|
||||
"If I'm already set up to compile Rust code, then I'd like to run something like this in Python to compile and execute the code:\n",
|
||||
"```python\n",
|
||||
"compile_command = # something here - to achieve the fastest possible runtime performance\n",
|
||||
"compile_result = subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
"run_command = # something here\n",
|
||||
"run_result = subprocess.run(run_command, check=True, text=True, capture_output=True)\n",
|
||||
"return run_result.stdout\n",
|
||||
"```\n",
|
||||
"Please tell me exactly what I should use for the compile_command and run_command.\n",
|
||||
"Have the maximum possible runtime performance in mind; compile time can be slow. Fastest possible runtime performance for this platform is key.\n",
|
||||
"Reply with the commands in markdown.\n",
|
||||
"\n",
|
||||
"System information:\n",
|
||||
"{system_info}\n",
|
||||
"\n",
|
||||
"Rust toolchain information:\n",
|
||||
"{rust_info}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(model=models[0], messages=[{\"role\": \"user\", \"content\": message}])\n",
|
||||
"display(Markdown(response.choices[0].message.content))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "81e92c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## For C++, overwrite this with the commands from yesterday, or for Rust, use the new commands\n",
|
||||
"\n",
|
||||
"Or just use the website like yesterday:\n",
|
||||
"\n",
|
||||
" https://www.programiz.com/cpp-programming/online-compiler/"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d734a634",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"compile_command = [\n",
|
||||
" \"/Users/ed/.cargo/bin/rustc\",\n",
|
||||
" \"main.rs\",\n",
|
||||
" \"-C\", \"opt-level=3\",\n",
|
||||
" \"-C\", \"target-cpu=native\",\n",
|
||||
" \"-C\", \"codegen-units=1\",\n",
|
||||
" \"-C\", \"lto=fat\",\n",
|
||||
" \"-C\", \"panic=abort\",\n",
|
||||
" \"-C\", \"strip=symbols\",\n",
|
||||
" \"-o\", \"main\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"run_command = [\"./main\"]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0b0a437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now, on with the main task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"language = \"Rust\" # or \"C++\"\n",
|
||||
"extension = \"rs\" if language == \"Rust\" else \"cpp\"\n",
|
||||
"\n",
|
||||
"system_prompt = f\"\"\"\n",
|
||||
"Your task is to convert Python code into high performance {language} code.\n",
|
||||
"Respond only with {language} code. Do not provide any explanation other than occasional comments.\n",
|
||||
"The {language} response needs to produce an identical output in the fastest possible time.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"def user_prompt_for(python):\n",
|
||||
" return f\"\"\"\n",
|
||||
"Port this Python code to {language} with the fastest possible implementation that produces identical output in the least time.\n",
|
||||
"The system information is:\n",
|
||||
"{system_info}\n",
|
||||
"Your response will be written to a file called main.{language} and then compiled and executed; the compilation command is:\n",
|
||||
"{compile_command}\n",
|
||||
"Respond only with {language} code.\n",
|
||||
"Python code to port:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{python}\n",
|
||||
"```\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(python):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
|
||||
" ]\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def write_output(code):\n",
|
||||
" with open(f\"main.{extension}\", \"w\") as f:\n",
|
||||
" f.write(code)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def port(model, python):\n",
|
||||
" client = clients[model]\n",
|
||||
" reasoning_effort = \"high\" if 'gpt' in model else None\n",
|
||||
" response = client.chat.completions.create(model=model, messages=messages_for(python), reasoning_effort=reasoning_effort)\n",
|
||||
" reply = response.choices[0].message.content\n",
|
||||
" reply = reply.replace('```cpp','').replace('```rust','').replace('```','')\n",
|
||||
" return reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_python(code):\n",
|
||||
" globals_dict = {\"__builtins__\": __builtins__}\n",
|
||||
"\n",
|
||||
" buffer = io.StringIO()\n",
|
||||
" old_stdout = sys.stdout\n",
|
||||
" sys.stdout = buffer\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" exec(code, globals_dict)\n",
|
||||
" output = buffer.getvalue()\n",
|
||||
" except Exception as e:\n",
|
||||
" output = f\"Error: {e}\"\n",
|
||||
" finally:\n",
|
||||
" sys.stdout = old_stdout\n",
|
||||
"\n",
|
||||
" return output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use the commands from GPT 5\n",
|
||||
"\n",
|
||||
"def compile_and_run(code):\n",
|
||||
" write_output(code)\n",
|
||||
" try:\n",
|
||||
" subprocess.run(compile_command, check=True, text=True, capture_output=True)\n",
|
||||
" run_result = subprocess.run(run_command, check=True, text=True, capture_output=True)\n",
|
||||
" return run_result.stdout\n",
|
||||
" except subprocess.CalledProcessError as e:\n",
|
||||
" return f\"An error occurred:\\n{e.stderr}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"python_hard = \"\"\"# Be careful to support large numbers\n",
|
||||
"\n",
|
||||
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
|
||||
" value = seed\n",
|
||||
" while True:\n",
|
||||
" value = (a * value + c) % m\n",
|
||||
" yield value\n",
|
||||
" \n",
|
||||
"def max_subarray_sum(n, seed, min_val, max_val):\n",
|
||||
" lcg_gen = lcg(seed)\n",
|
||||
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
|
||||
" max_sum = float('-inf')\n",
|
||||
" for i in range(n):\n",
|
||||
" current_sum = 0\n",
|
||||
" for j in range(i, n):\n",
|
||||
" current_sum += random_numbers[j]\n",
|
||||
" if current_sum > max_sum:\n",
|
||||
" max_sum = current_sum\n",
|
||||
" return max_sum\n",
|
||||
"\n",
|
||||
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
|
||||
" total_sum = 0\n",
|
||||
" lcg_gen = lcg(initial_seed)\n",
|
||||
" for _ in range(20):\n",
|
||||
" seed = next(lcg_gen)\n",
|
||||
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
|
||||
" return total_sum\n",
|
||||
"\n",
|
||||
"# Parameters\n",
|
||||
"n = 10000 # Number of random numbers\n",
|
||||
"initial_seed = 42 # Initial seed for the LCG\n",
|
||||
"min_val = -10 # Minimum value of random numbers\n",
|
||||
"max_val = 10 # Maximum value of random numbers\n",
|
||||
"\n",
|
||||
"# Timing the function\n",
|
||||
"import time\n",
|
||||
"start_time = time.time()\n",
|
||||
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
|
||||
"end_time = time.time()\n",
|
||||
"\n",
|
||||
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
|
||||
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "465d6cad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from styles import CSS\n",
|
||||
"\n",
|
||||
"with gr.Blocks(css=CSS, theme=gr.themes.Monochrome(), title=f\"Port from Python to {language}\") as ui:\n",
|
||||
" with gr.Row(equal_height=True):\n",
|
||||
" with gr.Column(scale=6):\n",
|
||||
" python = gr.Code(\n",
|
||||
" label=\"Python (original)\",\n",
|
||||
" value=python_hard,\n",
|
||||
" language=\"python\",\n",
|
||||
" lines=26\n",
|
||||
" )\n",
|
||||
" with gr.Column(scale=6):\n",
|
||||
" cpp = gr.Code(\n",
|
||||
" label=f\"{language} (generated)\",\n",
|
||||
" value=\"\",\n",
|
||||
" language=\"cpp\",\n",
|
||||
" lines=26\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" with gr.Row(elem_classes=[\"controls\"]):\n",
|
||||
" python_run = gr.Button(\"Run Python\", elem_classes=[\"run-btn\", \"py\"])\n",
|
||||
" model = gr.Dropdown(models, value=models[0], show_label=False)\n",
|
||||
" convert = gr.Button(f\"Port to {language}\", elem_classes=[\"convert-btn\"])\n",
|
||||
" cpp_run = gr.Button(f\"Run {language}\", elem_classes=[\"run-btn\", \"cpp\"])\n",
|
||||
"\n",
|
||||
" with gr.Row(equal_height=True):\n",
|
||||
" with gr.Column(scale=6):\n",
|
||||
" python_out = gr.TextArea(label=\"Python result\", lines=8, elem_classes=[\"py-out\"])\n",
|
||||
" with gr.Column(scale=6):\n",
|
||||
" cpp_out = gr.TextArea(label=f\"{language} result\", lines=8, elem_classes=[\"cpp-out\"])\n",
|
||||
"\n",
|
||||
" convert.click(fn=port, inputs=[model, python], outputs=[cpp])\n",
|
||||
" python_run.click(fn=run_python, inputs=[python], outputs=[python_out])\n",
|
||||
" cpp_run.click(fn=compile_and_run, inputs=[cpp], outputs=[cpp_out])\n",
|
||||
"\n",
|
||||
"ui.launch(inbrowser=True)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2311ada8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RESULTS!\n",
|
||||
"\n",
|
||||
"Qwen 2.5 Coder: FAIL \n",
|
||||
"Gemini 2.5 Pro: FAIL \n",
|
||||
"DeepSeek Coder v2: FAIL \n",
|
||||
"Qwen3 Coder 30B: FAIL \n",
|
||||
"Claude Sonnet 4.5: FAIL \n",
|
||||
"GPT-5: FAIL \n",
|
||||
"\n",
|
||||
"3rd place: GPT-oss-20B: 0.000341 \n",
|
||||
"2nd place: Grok 4: 0.000317 \n",
|
||||
"**1st place: OpenAI GPT-OSS 120B: 0.000304** "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9b51dc7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(f\"In Ed's experimenet, the GPT-OSS 120B model outcome is {33.755209/0.000304:,.0f} times faster than the Python code.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6197bb97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <chrono>
|
||||
#include <limits>
|
||||
#include <iomanip>
|
||||
|
||||
class LCG {
|
||||
private:
|
||||
uint64_t value;
|
||||
const uint64_t a = 1664525;
|
||||
const uint64_t c = 1013904223;
|
||||
const uint64_t m = 1ULL << 32;
|
||||
|
||||
public:
|
||||
LCG(uint64_t seed) : value(seed) {}
|
||||
|
||||
uint64_t next() {
|
||||
value = (a * value + c) % m;
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
int64_t max_subarray_sum(int n, uint64_t seed, int min_val, int max_val) {
|
||||
LCG lcg(seed);
|
||||
std::vector<int> random_numbers(n);
|
||||
for (int i = 0; i < n; ++i) {
|
||||
random_numbers[i] = static_cast<int>(lcg.next() % (max_val - min_val + 1) + min_val);
|
||||
}
|
||||
|
||||
int64_t max_sum = std::numeric_limits<int64_t>::min();
|
||||
int64_t current_sum = 0;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
current_sum = std::max(static_cast<int64_t>(random_numbers[i]), current_sum + random_numbers[i]);
|
||||
max_sum = std::max(max_sum, current_sum);
|
||||
}
|
||||
return max_sum;
|
||||
}
|
||||
|
||||
int64_t total_max_subarray_sum(int n, uint64_t initial_seed, int min_val, int max_val) {
|
||||
int64_t total_sum = 0;
|
||||
LCG lcg(initial_seed);
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
uint64_t seed = lcg.next();
|
||||
total_sum += max_subarray_sum(n, seed, min_val, max_val);
|
||||
}
|
||||
return total_sum;
|
||||
}
|
||||
|
||||
int main() {
|
||||
int n = 10000;
|
||||
uint64_t initial_seed = 42;
|
||||
int min_val = -10;
|
||||
int max_val = 10;
|
||||
|
||||
auto start_time = std::chrono::high_resolution_clock::now();
|
||||
int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val);
|
||||
auto end_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
|
||||
|
||||
std::cout << "Total Maximum Subarray Sum (20 runs): " << result << std::endl;
|
||||
std::cout << "Execution Time: " << std::fixed << std::setprecision(6) << duration.count() / 1e6 << " seconds" << std::endl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
|
||||
#include <iostream>
|
||||
|
||||
int main() {
|
||||
std::cout << "Hello";
|
||||
return 0;
|
||||
}
|
||||
60
week4/styles.py
Normal file
60
week4/styles.py
Normal file
@@ -0,0 +1,60 @@
|
||||
CSS = """
|
||||
:root {
|
||||
--py-color: #209dd7;
|
||||
--cpp-color: #ecad0a;
|
||||
--accent: #753991;
|
||||
--card: #161a22;
|
||||
--text: #e9eef5;
|
||||
}
|
||||
|
||||
/* Full-width layout */
|
||||
.gradio-container {
|
||||
max-width: 100% !important;
|
||||
padding: 0 40px !important;
|
||||
}
|
||||
|
||||
/* Code card styling */
|
||||
.card {
|
||||
background: var(--card);
|
||||
border: 1px solid rgba(255,255,255,.08);
|
||||
border-radius: 14px;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
/* Buttons */
|
||||
.convert-btn button {
|
||||
background: var(--accent) !important;
|
||||
border-color: rgba(255,255,255,.12) !important;
|
||||
color: white !important;
|
||||
font-weight: 700;
|
||||
}
|
||||
.run-btn button {
|
||||
background: #202631 !important;
|
||||
color: var(--text) !important;
|
||||
border-color: rgba(255,255,255,.12) !important;
|
||||
}
|
||||
.run-btn.py button:hover { box-shadow: 0 0 0 2px var(--py-color) inset; }
|
||||
.run-btn.cpp button:hover { box-shadow: 0 0 0 2px var(--cpp-color) inset; }
|
||||
.convert-btn button:hover { box-shadow: 0 0 0 2px var(--accent) inset; }
|
||||
|
||||
/* Outputs with color tint */
|
||||
.py-out textarea {
|
||||
background: linear-gradient(180deg, rgba(32,157,215,.18), rgba(32,157,215,.10));
|
||||
border: 1px solid rgba(32,157,215,.35) !important;
|
||||
color: rgba(32,157,215,1) !important;
|
||||
font-weight: 600;
|
||||
}
|
||||
.cpp-out textarea {
|
||||
background: linear-gradient(180deg, rgba(236,173,10,.22), rgba(236,173,10,.12));
|
||||
border: 1px solid rgba(236,173,10,.45) !important;
|
||||
color: rgba(236,173,10,1) !important;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
/* Align controls neatly */
|
||||
.controls .wrap {
|
||||
gap: 10px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
"""
|
||||
359
week4/system_info.py
Normal file
359
week4/system_info.py
Normal file
@@ -0,0 +1,359 @@
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
# ------------------------- helpers -------------------------
|
||||
|
||||
|
||||
def _run(cmd, timeout=3):
|
||||
"""Run a command safely. Returns stdout text or ''.
|
||||
Accepts either a string (shell) or list (no shell)."""
|
||||
try:
|
||||
if isinstance(cmd, str):
|
||||
return subprocess.check_output(
|
||||
cmd, shell=True, text=True, stderr=subprocess.DEVNULL, timeout=timeout
|
||||
).strip()
|
||||
else:
|
||||
return subprocess.check_output(
|
||||
cmd, shell=False, text=True, stderr=subprocess.DEVNULL, timeout=timeout
|
||||
).strip()
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
|
||||
def _first_line(s: str) -> str:
|
||||
s = (s or "").strip()
|
||||
return s.splitlines()[0].strip() if s else ""
|
||||
|
||||
|
||||
def _which(name: str) -> str:
|
||||
return shutil.which(name) or ""
|
||||
|
||||
|
||||
def _bool_from_output(s: str) -> bool:
|
||||
return s.strip() in {"1", "true", "True", "YES", "Yes", "yes"}
|
||||
|
||||
|
||||
# ------------------------- OS & env -------------------------
|
||||
|
||||
|
||||
def _os_block():
|
||||
sysname = platform.system() # 'Windows', 'Darwin', 'Linux'
|
||||
machine = platform.machine() or ""
|
||||
release = platform.release() or ""
|
||||
version = platform.version() or ""
|
||||
kernel = release if sysname == "Windows" else (_run(["uname", "-r"]) or release)
|
||||
|
||||
distro = {"name": "", "version": ""}
|
||||
if sysname == "Linux":
|
||||
# Best-effort parse of /etc/os-release
|
||||
try:
|
||||
with open("/etc/os-release", "r") as f:
|
||||
data = {}
|
||||
for line in f:
|
||||
if "=" in line:
|
||||
k, v = line.rstrip().split("=", 1)
|
||||
data[k] = v.strip('"')
|
||||
distro["name"] = data.get("PRETTY_NAME") or data.get("NAME", "")
|
||||
distro["version"] = data.get("VERSION_ID") or data.get("VERSION", "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# WSL / Rosetta detection (harmless if not present)
|
||||
wsl = False
|
||||
if sysname != "Windows":
|
||||
try:
|
||||
with open("/proc/version", "r") as f:
|
||||
v = f.read().lower()
|
||||
wsl = ("microsoft" in v) or ("wsl" in v)
|
||||
except Exception:
|
||||
wsl = False
|
||||
|
||||
rosetta = False
|
||||
if sysname == "Darwin":
|
||||
rosetta = _bool_from_output(_run(["sysctl", "-in", "sysctl.proc_translated"]))
|
||||
|
||||
# Target triple (best effort)
|
||||
target = ""
|
||||
for cc in ("clang", "gcc"):
|
||||
if _which(cc):
|
||||
out = _run([cc, "-dumpmachine"])
|
||||
if out:
|
||||
target = _first_line(out)
|
||||
break
|
||||
|
||||
return {
|
||||
"system": sysname,
|
||||
"arch": machine,
|
||||
"release": release,
|
||||
"version": version,
|
||||
"kernel": kernel,
|
||||
"distro": distro if sysname == "Linux" else None,
|
||||
"wsl": wsl,
|
||||
"rosetta2_translated": rosetta,
|
||||
"target_triple": target,
|
||||
}
|
||||
|
||||
|
||||
# ------------------------- package managers -------------------------
|
||||
|
||||
|
||||
def _package_managers():
|
||||
sysname = platform.system()
|
||||
pms = []
|
||||
if sysname == "Windows":
|
||||
for pm in ("winget", "choco", "scoop"):
|
||||
if _which(pm):
|
||||
pms.append(pm)
|
||||
elif sysname == "Darwin":
|
||||
if _run(["xcode-select", "-p"]):
|
||||
pms.append("xcode-select (CLT)")
|
||||
for pm in ("brew", "port"):
|
||||
if _which(pm):
|
||||
pms.append(pm)
|
||||
else:
|
||||
for pm in ("apt", "dnf", "yum", "pacman", "zypper", "apk", "emerge"):
|
||||
if _which(pm):
|
||||
pms.append(pm)
|
||||
return pms
|
||||
|
||||
|
||||
# ------------------------- CPU (minimal) -------------------------
|
||||
|
||||
|
||||
def _cpu_block():
|
||||
sysname = platform.system()
|
||||
brand = ""
|
||||
# A simple brand/model read per OS; ignore failures
|
||||
if sysname == "Linux":
|
||||
brand = _run("grep -m1 'model name' /proc/cpuinfo | cut -d: -f2").strip()
|
||||
elif sysname == "Darwin":
|
||||
brand = _run(["sysctl", "-n", "machdep.cpu.brand_string"])
|
||||
elif sysname == "Windows":
|
||||
brand = _run('powershell -NoProfile -Command "(Get-CimInstance Win32_Processor).Name"')
|
||||
if not brand:
|
||||
brand = _run("wmic cpu get Name /value").replace("Name=", "").strip()
|
||||
|
||||
# Logical cores always available; physical is best-effort
|
||||
cores_logical = os.cpu_count() or 0
|
||||
cores_physical = 0
|
||||
if sysname == "Darwin":
|
||||
cores_physical = int(_run(["sysctl", "-n", "hw.physicalcpu"]) or "0")
|
||||
elif sysname == "Windows":
|
||||
cores_physical = int(
|
||||
_run('powershell -NoProfile -Command "(Get-CimInstance Win32_Processor).NumberOfCores"')
|
||||
or "0"
|
||||
)
|
||||
elif sysname == "Linux":
|
||||
# This is a quick approximation; fine for our use (parallel -j suggestions)
|
||||
try:
|
||||
# Count unique "core id" per physical id
|
||||
mapping = _run("LC_ALL=C lscpu -p=CORE,SOCKET | grep -v '^#'").splitlines()
|
||||
unique = set(tuple(line.split(",")) for line in mapping if "," in line)
|
||||
cores_physical = len(unique) or 0
|
||||
except Exception:
|
||||
cores_physical = 0
|
||||
|
||||
# A tiny SIMD hint set (best-effort, optional)
|
||||
simd = []
|
||||
if sysname == "Linux":
|
||||
flags = _run("grep -m1 'flags' /proc/cpuinfo | cut -d: -f2")
|
||||
if flags:
|
||||
fset = set(flags.upper().split())
|
||||
for x in ("AVX512F", "AVX2", "AVX", "FMA", "SSE4_2", "NEON", "SVE"):
|
||||
if x in fset:
|
||||
simd.append(x)
|
||||
elif sysname == "Darwin":
|
||||
feats = (
|
||||
(
|
||||
_run(["sysctl", "-n", "machdep.cpu.features"])
|
||||
+ " "
|
||||
+ _run(["sysctl", "-n", "machdep.cpu.leaf7_features"])
|
||||
)
|
||||
.upper()
|
||||
.split()
|
||||
)
|
||||
for x in ("AVX512F", "AVX2", "AVX", "FMA", "SSE4_2", "NEON", "SVE"):
|
||||
if x in feats:
|
||||
simd.append(x)
|
||||
# On Windows, skip flags — brand typically suffices for MSVC /arch choice.
|
||||
|
||||
return {
|
||||
"brand": brand.strip(),
|
||||
"cores_logical": cores_logical,
|
||||
"cores_physical": cores_physical,
|
||||
"simd": sorted(set(simd)),
|
||||
}
|
||||
|
||||
|
||||
# ------------------------- toolchain presence -------------------------
|
||||
|
||||
|
||||
def _toolchain_block():
|
||||
def ver_line(exe, args=("--version",)):
|
||||
p = _which(exe)
|
||||
if not p:
|
||||
return ""
|
||||
out = _run([p, *args])
|
||||
return _first_line(out)
|
||||
|
||||
gcc = ver_line("gcc")
|
||||
gpp = ver_line("g++")
|
||||
clang = ver_line("clang")
|
||||
|
||||
# MSVC cl (only available inside proper dev shell; handle gracefully)
|
||||
msvc_cl = ""
|
||||
cl_path = _which("cl")
|
||||
if cl_path:
|
||||
msvc_cl = _first_line(_run("cl 2>&1"))
|
||||
|
||||
# Build tools (presence + short version line)
|
||||
cmake = ver_line("cmake")
|
||||
ninja = _first_line(_run([_which("ninja"), "--version"])) if _which("ninja") else ""
|
||||
make = ver_line("make")
|
||||
|
||||
# Linker (we only care if lld is available)
|
||||
lld = ver_line("ld.lld")
|
||||
return {
|
||||
"compilers": {"gcc": gcc, "g++": gpp, "clang": clang, "msvc_cl": msvc_cl},
|
||||
"build_tools": {"cmake": cmake, "ninja": ninja, "make": make},
|
||||
"linkers": {"ld_lld": lld},
|
||||
}
|
||||
|
||||
|
||||
# ------------------------- public API -------------------------
|
||||
|
||||
|
||||
def retrieve_system_info():
|
||||
"""
|
||||
Returns a compact dict with enough info for an LLM to:
|
||||
- Pick an install path (winget/choco/scoop, Homebrew/Xcode CLT, apt/dnf/...),
|
||||
- Choose a compiler family (MSVC/clang/gcc),
|
||||
- Suggest safe optimization flags (e.g., -O3/-march=native or MSVC /O2),
|
||||
- Decide on a build system (cmake+ninja) and parallel -j value.
|
||||
"""
|
||||
return {
|
||||
"os": _os_block(),
|
||||
"package_managers": _package_managers(),
|
||||
"cpu": _cpu_block(),
|
||||
"toolchain": _toolchain_block(),
|
||||
}
|
||||
|
||||
|
||||
def rust_toolchain_info():
|
||||
"""
|
||||
Return a dict with Rust-related settings:
|
||||
- presence and paths for rustc / cargo / rustup / rust-analyzer
|
||||
- versions
|
||||
- active/default toolchain (if rustup is present)
|
||||
- installed targets
|
||||
- common env vars (CARGO_HOME, RUSTUP_HOME, RUSTFLAGS, CARGO_BUILD_TARGET)
|
||||
- simple execution examples
|
||||
Works on Windows, macOS, and Linux. Uses the existing helpers: _run, _which, _first_line.
|
||||
"""
|
||||
info = {
|
||||
"installed": False,
|
||||
"rustc": {"path": "", "version": "", "host_triple": "", "release": "", "commit_hash": ""},
|
||||
"cargo": {"path": "", "version": ""},
|
||||
"rustup": {
|
||||
"path": "",
|
||||
"version": "",
|
||||
"active_toolchain": "",
|
||||
"default_toolchain": "",
|
||||
"toolchains": [],
|
||||
"targets_installed": [],
|
||||
},
|
||||
"rust_analyzer": {"path": ""},
|
||||
"env": {
|
||||
"CARGO_HOME": os.environ.get("CARGO_HOME", ""),
|
||||
"RUSTUP_HOME": os.environ.get("RUSTUP_HOME", ""),
|
||||
"RUSTFLAGS": os.environ.get("RUSTFLAGS", ""),
|
||||
"CARGO_BUILD_TARGET": os.environ.get("CARGO_BUILD_TARGET", ""),
|
||||
},
|
||||
"execution_examples": [],
|
||||
}
|
||||
|
||||
# Paths
|
||||
rustc_path = _which("rustc")
|
||||
cargo_path = _which("cargo")
|
||||
rustup_path = _which("rustup")
|
||||
ra_path = _which("rust-analyzer")
|
||||
|
||||
info["rustc"]["path"] = rustc_path or ""
|
||||
info["cargo"]["path"] = cargo_path or ""
|
||||
info["rustup"]["path"] = rustup_path or ""
|
||||
info["rust_analyzer"]["path"] = ra_path or ""
|
||||
|
||||
# Versions & verbose details
|
||||
if rustc_path:
|
||||
ver_line = _first_line(_run([rustc_path, "--version"]))
|
||||
info["rustc"]["version"] = ver_line
|
||||
verbose = _run([rustc_path, "--version", "--verbose"])
|
||||
host = release = commit = ""
|
||||
for line in verbose.splitlines():
|
||||
if line.startswith("host:"):
|
||||
host = line.split(":", 1)[1].strip()
|
||||
elif line.startswith("release:"):
|
||||
release = line.split(":", 1)[1].strip()
|
||||
elif line.startswith("commit-hash:"):
|
||||
commit = line.split(":", 1)[1].strip()
|
||||
info["rustc"]["host_triple"] = host
|
||||
info["rustc"]["release"] = release
|
||||
info["rustc"]["commit_hash"] = commit
|
||||
|
||||
if cargo_path:
|
||||
info["cargo"]["version"] = _first_line(_run([cargo_path, "--version"]))
|
||||
|
||||
if rustup_path:
|
||||
info["rustup"]["version"] = _first_line(_run([rustup_path, "--version"]))
|
||||
# Active toolchain
|
||||
active = _first_line(_run([rustup_path, "show", "active-toolchain"]))
|
||||
info["rustup"]["active_toolchain"] = active
|
||||
|
||||
# Default toolchain (best effort)
|
||||
# Try parsing `rustup toolchain list` and pick the line with "(default)"
|
||||
tlist = _run([rustup_path, "toolchain", "list"]).splitlines()
|
||||
info["rustup"]["toolchains"] = [t.strip() for t in tlist if t.strip()]
|
||||
default_tc = ""
|
||||
for line in tlist:
|
||||
if "(default)" in line:
|
||||
default_tc = line.strip()
|
||||
break
|
||||
if not default_tc:
|
||||
# Fallback: sometimes `rustup show` includes "default toolchain: ..."
|
||||
for line in _run([rustup_path, "show"]).splitlines():
|
||||
if "default toolchain:" in line:
|
||||
default_tc = line.split(":", 1)[1].strip()
|
||||
break
|
||||
info["rustup"]["default_toolchain"] = default_tc
|
||||
|
||||
# Installed targets
|
||||
targets = _run([rustup_path, "target", "list", "--installed"]).split()
|
||||
info["rustup"]["targets_installed"] = targets
|
||||
|
||||
# Execution examples (only include what will work on this system)
|
||||
exec_examples = []
|
||||
if cargo_path:
|
||||
exec_examples.append(f'"{cargo_path}" build')
|
||||
exec_examples.append(f'"{cargo_path}" run')
|
||||
exec_examples.append(f'"{cargo_path}" test')
|
||||
if rustc_path:
|
||||
exec_examples.append(f'"{rustc_path}" hello.rs -o hello')
|
||||
info["execution_examples"] = exec_examples
|
||||
|
||||
# Installed?
|
||||
info["installed"] = bool(rustc_path or cargo_path or rustup_path)
|
||||
|
||||
# Fill in default homes if env vars are empty but typical locations exist
|
||||
def _maybe_default_home(env_val, default_basename):
|
||||
if env_val:
|
||||
return env_val
|
||||
home = os.path.expanduser("~") or ""
|
||||
candidate = os.path.join(home, default_basename) if home else ""
|
||||
return candidate if candidate and os.path.isdir(candidate) else ""
|
||||
|
||||
info["env"]["CARGO_HOME"] = _maybe_default_home(info["env"]["CARGO_HOME"], ".cargo")
|
||||
info["env"]["RUSTUP_HOME"] = _maybe_default_home(info["env"]["RUSTUP_HOME"], ".rustup")
|
||||
|
||||
return info
|
||||
Reference in New Issue
Block a user