AI Automated Test Scenario Generation System

We design and deploy artificial intelligence systems: from prototype to production-ready solutions. Our team combines expertise in machine learning, data engineering and MLOps to make AI work not in the lab, but in real business.
Showing 1 of 1 servicesAll 1566 services
AI Automated Test Scenario Generation System
Medium
from 1 business day to 3 business days
FAQ
AI Development Areas
AI Solution Development Stages
Latest works
  • image_website-b2b-advance_0.png
    B2B ADVANCE company website development
    1215
  • image_web-applications_feedme_466_0.webp
    Development of a web application for FEEDME
    1161
  • image_websites_belfingroup_462_0.webp
    Website development for BELFINGROUP
    852
  • image_ecommerce_furnoro_435_0.webp
    Development of an online store for the company FURNORO
    1043
  • image_logo-advance_0.png
    B2B Advance company logo design
    561
  • image_crm_enviok_479_0.webp
    Development of a web application for Enviok
    823

AI Auto-Generation of Test Scenarios

A test scenario is not a test. It's a description of a situation to test: user, action, conditions, expected result. Writing test scenarios from requirements is monotonous work that gets done hastily or skipped altogether when time is short. An AI generator creates test scenarios from user stories, specifications, and ER diagrams, covering positive paths, negative cases, and boundary conditions.

Generation from User Stories

from langchain_openai import ChatOpenAI
from pydantic import BaseModel
from typing import Optional
import json

class TestScenario(BaseModel):
    id: str
    title: str
    type: str           # positive / negative / boundary / edge_case
    priority: str       # P1 / P2 / P3
    preconditions: list[str]
    steps: list[str]
    expected_result: str
    tags: list[str]
    related_requirement: str

class TestScenarioGenerator:
    GENERATION_PROMPT = """You are an experienced QA engineer. Create test scenarios from a requirement.

Requirement: {requirement}
Acceptance Criteria:
{acceptance_criteria}

Create test scenarios of three types:
1. **Positive** (happy path + main variations)
2. **Negative** (invalid data, forbidden operations)
3. **Boundary** (min/max values, empty data)

For each scenario:
- Title (action + condition)
- Preconditions
- Steps (concrete, not "click button" but "click 'Save' button in registration form")
- Expected result (measurable)
- Priority (P1 — critical business flow, P2 — important, P3 — secondary)

Return JSON array of TestScenario."""

    def __init__(self):
        self.llm = ChatOpenAI(model="gpt-4o", temperature=0.2)

    def generate_from_user_story(
        self,
        user_story: str,
        acceptance_criteria: list[str],
        domain_context: str = ""
    ) -> list[TestScenario]:
        ac_text = "\n".join([f"- {ac}" for ac in acceptance_criteria])

        prompt = self.GENERATION_PROMPT.format(
            requirement=user_story + ("\n\nDomain context: " + domain_context if domain_context else ""),
            acceptance_criteria=ac_text
        )

        response = self.llm.invoke(prompt)
        scenarios_data = json.loads(response.content)

        return [TestScenario(**s) for s in scenarios_data]

    def generate_from_api_spec(self, openapi_spec: dict) -> list[TestScenario]:
        """Generates scenarios from OpenAPI specification"""
        scenarios = []
        for path, methods in openapi_spec.get("paths", {}).items():
            for method, spec in methods.items():
                endpoint_scenarios = self._generate_endpoint_scenarios(
                    path, method, spec
                )
                scenarios.extend(endpoint_scenarios)
        return scenarios

    def _generate_endpoint_scenarios(
        self,
        path: str,
        method: str,
        spec: dict
    ) -> list[TestScenario]:
        prompt = f"""Create test scenarios for API endpoint.

Endpoint: {method.upper()} {path}
Description: {spec.get('summary', '')}
Parameters: {json.dumps(spec.get('parameters', []), indent=2)}
Request body: {json.dumps(spec.get('requestBody', {}), indent=2)}
Responses: {json.dumps(spec.get('responses', {}), indent=2)}

Scenarios:
- 200 (success) with valid data
- 400 (bad request) — invalid parameters
- 401/403 — authorization
- 404 — resource not found
- Boundary values for numeric parameters
- Endpoint-specific (based on description)

Return JSON array of test scenarios."""

        response = self.llm.invoke(prompt)
        return json.loads(response.content)

Integration with Jira + TestRail

from jira import JIRA
import testrail

class TestScenarioSync:
    def push_to_testrail(
        self,
        scenarios: list[TestScenario],
        project_id: int,
        suite_id: int
    ):
        """Publishes scenarios to TestRail"""
        client = testrail.APIClient(TESTRAIL_URL)
        client.user = TESTRAIL_USER
        client.password = TESTRAIL_KEY

        for scenario in scenarios:
            client.send_post(
                f'add_case/{suite_id}',
                {
                    'title': scenario.title,
                    'type_id': self._get_type_id(scenario.type),
                    'priority_id': self._get_priority_id(scenario.priority),
                    'custom_preconds': '\n'.join(scenario.preconditions),
                    'custom_steps': '\n'.join([
                        f"{i+1}. {step}"
                        for i, step in enumerate(scenario.steps)
                    ]),
                    'custom_expected': scenario.expected_result,
                    'refs': scenario.related_requirement
                }
            )

    def pull_from_jira_and_generate(
        self,
        jira_client: JIRA,
        sprint_id: str
    ) -> list[TestScenario]:
        """Gets user stories from sprint and generates scenarios"""
        issues = jira_client.search_issues(
            f'sprint = {sprint_id} AND issuetype = Story AND status != Done'
        )
        all_scenarios = []
        for issue in issues:
            scenarios = self.generator.generate_from_user_story(
                user_story=issue.fields.summary,
                acceptance_criteria=self._extract_ac(issue.fields.description),
            )
            all_scenarios.extend(scenarios)
        return all_scenarios

Requirements Coverage Matrix

def build_coverage_matrix(
    requirements: list[dict],
    scenarios: list[TestScenario]
) -> dict:
    """Builds traceability matrix requirements → scenarios"""
    matrix = {}
    for req in requirements:
        req_id = req["id"]
        covered_scenarios = [
            s for s in scenarios
            if s.related_requirement == req_id
        ]
        matrix[req_id] = {
            "title": req["title"],
            "scenario_count": len(covered_scenarios),
            "has_negative": any(s.type == "negative" for s in covered_scenarios),
            "has_boundary": any(s.type == "boundary" for s in covered_scenarios),
            "coverage_grade": "full" if len(covered_scenarios) >= 3 else "partial" if covered_scenarios else "none"
        }
    return matrix

Case study: ERP system, 200+ user stories per quarter. QA team (4 people) couldn't write scenarios before sprint start — test planning happened in parallel with development. After AI generator implementation: 200 user stories → 1100 test scenarios in 2 hours (including review). QA time on scenario writing: 3 days → 4 hours (review and correction). P1 scenarios 100% covered before development starts.

Timeframe: generator from user stories + Jira integration: 2–3 weeks; with TestRail/Xray and traceability matrix: 4–5 weeks.