Added test case automation solution
This commit is contained in:
BIN
week1/community-contributions/reqdoc.docx
Normal file
BIN
week1/community-contributions/reqdoc.docx
Normal file
Binary file not shown.
308
week1/community-contributions/testcase_automation.ipynb
Normal file
308
week1/community-contributions/testcase_automation.ipynb
Normal file
@@ -0,0 +1,308 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "it1JLoxrSqO1",
|
||||||
|
"metadata": {
|
||||||
|
"id": "it1JLoxrSqO1"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"!pip install openai python-docx python-dotenv"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56",
|
||||||
|
"metadata": {
|
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import json\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"from IPython.display import Markdown, display, update_display\n",
|
||||||
|
"from openai import OpenAI\n",
|
||||||
|
"from docx import Document"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d",
|
||||||
|
"metadata": {
|
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class ReqDoc:\n",
|
||||||
|
" def __init__(self, file_path):\n",
|
||||||
|
" self.file_path = file_path\n",
|
||||||
|
"\n",
|
||||||
|
" def extract(self):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" try:\n",
|
||||||
|
" # Check if the file exists\n",
|
||||||
|
" if not os.path.exists(self.file_path):\n",
|
||||||
|
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n",
|
||||||
|
"\n",
|
||||||
|
" # Attempt to open and read the document\n",
|
||||||
|
" doc = Document(self.file_path)\n",
|
||||||
|
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n",
|
||||||
|
" return text\n",
|
||||||
|
"\n",
|
||||||
|
" except FileNotFoundError as fnf_error:\n",
|
||||||
|
" print(fnf_error)\n",
|
||||||
|
" return None\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" print(f\"An error occurred: {e}\")\n",
|
||||||
|
" return None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9",
|
||||||
|
"metadata": {
|
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Initialize and constants\n",
|
||||||
|
"load_dotenv(override=True)\n",
|
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||||
|
"\n",
|
||||||
|
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n",
|
||||||
|
" print(\"API key looks good!\")\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"There might be a problem with your API key. Please check!\")\n",
|
||||||
|
" \n",
|
||||||
|
"MODEL = 'gpt-4o-mini'\n",
|
||||||
|
"openai = OpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb",
|
||||||
|
"metadata": {
|
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Set up system prompt for extracting just the requirements from the document\n",
|
||||||
|
"\n",
|
||||||
|
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n",
|
||||||
|
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n",
|
||||||
|
"functional or non-functional and list them all.\\n\"\n",
|
||||||
|
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n",
|
||||||
|
"Do not make up your own requirements. \\n\"\n",
|
||||||
|
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n",
|
||||||
|
"req_doc_system_prompt += \"\"\"\n",
|
||||||
|
"{\n",
|
||||||
|
" \"requirements\": [\n",
|
||||||
|
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
|
||||||
|
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
|
||||||
|
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n",
|
||||||
|
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n",
|
||||||
|
" ]\n",
|
||||||
|
"}\n",
|
||||||
|
"\"\"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895",
|
||||||
|
"metadata": {
|
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n",
|
||||||
|
"#respond in JSON format.\n",
|
||||||
|
"\n",
|
||||||
|
"def req_doc_user_prompt(doc):\n",
|
||||||
|
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n",
|
||||||
|
" user_prompt += f\"{doc.extract()} \\n\"\n",
|
||||||
|
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n",
|
||||||
|
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n",
|
||||||
|
"You must respond in a JSON format\"\n",
|
||||||
|
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n",
|
||||||
|
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n",
|
||||||
|
" return user_prompt\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891",
|
||||||
|
"metadata": {
|
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n",
|
||||||
|
"\n",
|
||||||
|
"def get_requirements(doc):\n",
|
||||||
|
" reqdoc = ReqDoc(doc)\n",
|
||||||
|
" response = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n",
|
||||||
|
" ],\n",
|
||||||
|
" response_format={\"type\": \"json_object\"}\n",
|
||||||
|
" )\n",
|
||||||
|
" result = response.choices[0].message.content\n",
|
||||||
|
" return json.loads(result)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e",
|
||||||
|
"metadata": {
|
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Uncomment and run this if you want to see the extracted requriements in json format.\n",
|
||||||
|
"#get_requirements(\"reqdoc.docx\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93",
|
||||||
|
"metadata": {
|
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"### Next, we will make another call to gpt-4o-mini"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b",
|
||||||
|
"metadata": {
|
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#Set up system prompt to ask for test cases in table format\n",
|
||||||
|
"\n",
|
||||||
|
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n",
|
||||||
|
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n",
|
||||||
|
"The table will contain the following columns. \\\n",
|
||||||
|
"1.S No\\\n",
|
||||||
|
"2.Requirement No\\\n",
|
||||||
|
"3.Requirement Description\\\n",
|
||||||
|
"4.Test Case ID\\\n",
|
||||||
|
"5.Test case summary\\\n",
|
||||||
|
"6.Test case description\\\n",
|
||||||
|
"7.Success criteria \\n\"\n",
|
||||||
|
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5",
|
||||||
|
"metadata": {
|
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n",
|
||||||
|
"\n",
|
||||||
|
"def get_testcase_user_prompt(reqdoc):\n",
|
||||||
|
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n",
|
||||||
|
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n",
|
||||||
|
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n",
|
||||||
|
" user_prompt += user_prompt[:25000]\n",
|
||||||
|
" return user_prompt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0",
|
||||||
|
"metadata": {
|
||||||
|
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n",
|
||||||
|
"def create_testcase_doc(reqdoc):\n",
|
||||||
|
" stream = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(reqdoc)}\n",
|
||||||
|
" ],\n",
|
||||||
|
" stream=True\n",
|
||||||
|
" )\n",
|
||||||
|
" response = \"\"\n",
|
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||||
|
" for chunk in stream:\n",
|
||||||
|
" response += chunk.choices[0].delta.content or ''\n",
|
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb",
|
||||||
|
"metadata": {
|
||||||
|
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#The final piece of code. Provide the uploaded requirements filename below.\n",
|
||||||
|
"file_path = r\"reqdoc.docx\"\n",
|
||||||
|
"#print(file_path)\n",
|
||||||
|
"create_testcase_doc(file_path)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "82ae4371-22dd-4f2a-97c9-a70e0232a0aa",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.13.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user