AI Tutor for VR Training
VR training simulators without an adaptive component are expensive video with controls. A learner goes through the same scenario regardless of mistakes: they error on step 3, but the scenario continues anyway. An AI tutor changes the logic: the system tracks every action, identifies error patterns, adapts the next scenario, and provides personalized real-time feedback.
Adaptive Tutor Architecture
from langchain_openai import ChatOpenAI
from dataclasses import dataclass, field
from typing import Optional
import json
from datetime import datetime
@dataclass
class LearnerProfile:
learner_id: str
session_history: list[dict] = field(default_factory=list)
skill_scores: dict = field(default_factory=dict) # skill → 0.0–1.0
common_errors: list[str] = field(default_factory=list)
learning_pace: float = 1.0 # learning speed coefficient
@dataclass
class VRAction:
action_type: str # step_completed, error, timeout, skip
step_id: str
timestamp: float
details: dict # specific action parameters
correct: bool
class AdaptiveTutor:
FEEDBACK_PROMPT = """You are an AI tutor in a VR training simulator. Provide brief (1–2 sentences) voice feedback.
The learner just: {action_description}
Was it executed correctly: {is_correct}
{"Error: " + "{error_details}" if not "{is_correct}" else ""}
Learner's error history: {error_history}
Tone: supportive, specific. Don't praise for obvious actions.
If the error repeats — explain the principle, don't just say "incorrect"."""
def __init__(self, llm: ChatOpenAI):
self.llm = llm
def analyze_action(self, action: VRAction, profile: LearnerProfile) -> dict:
"""Analyzes action and determines next step"""
# Update skill score
skill = self._get_skill_for_step(action.step_id)
if skill:
current = profile.skill_scores.get(skill, 0.5)
if action.correct:
profile.skill_scores[skill] = min(1.0, current + 0.05)
else:
profile.skill_scores[skill] = max(0.0, current - 0.1)
if action.action_type not in profile.common_errors:
profile.common_errors.append(action.step_id)
# Adapt scenario
next_scenario = self._select_next_scenario(profile)
return {
"feedback": self._generate_feedback(action, profile),
"next_scenario": next_scenario,
"skill_update": profile.skill_scores.get(skill, 0.5)
}
def _select_next_scenario(self, profile: LearnerProfile) -> dict:
"""Selects next scenario based on learner profile"""
weak_skills = [
skill for skill, score in profile.skill_scores.items()
if score < 0.6
]
if weak_skills:
# Focus on weak areas
return {
"type": "remedial",
"target_skills": weak_skills[:2],
"difficulty": "easier",
"reason": f"Requires practice: {', '.join(weak_skills[:2])}"
}
elif all(s > 0.8 for s in profile.skill_scores.values()):
return {
"type": "advancement",
"difficulty": "harder",
"reason": "Good mastery of basic skills"
}
else:
return {"type": "standard", "difficulty": "normal"}
async def _generate_feedback(
self,
action: VRAction,
profile: LearnerProfile
) -> str:
repeated_error = action.step_id in profile.common_errors
prompt = f"""Provide feedback to the learner in VR.
Action: {action.action_type} on step {action.step_id}
Correct: {action.correct}
Repeating error: {repeated_error}
Error details: {json.dumps(action.details, ensure_ascii=False)}
{"This error repeats. Explain the principle, not just that it was incorrect." if repeated_error else ""}
1 sentence. Use informal "you". Be specific."""
result = await self.llm.ainvoke(prompt)
return result.content
Competency Assessment and Progress Tracking
class CompetencyTracker:
"""Tracks competency mastery using IRT (Item Response Theory)"""
def update_ability(
self,
learner_theta: float, # current level (-3 to +3)
item_difficulty: float, # task difficulty (-3 to +3)
correct: bool
) -> float:
"""Updates ability estimate using 3PL IRT model"""
# 3-parameter logistic model
a = 1.7 # discrimination
c = 0.25 # guessing parameter
# Probability of correct response
p = c + (1 - c) / (1 + math.exp(-a * (learner_theta - item_difficulty)))
# Update via gradient step
info = a**2 * (p - c)**2 * (1 - p) / ((1 - c)**2 * p)
delta = 0.3 * (1 if correct else -1) / (info + 0.01)
return max(-3, min(3, learner_theta + delta))
Unity: Integrating Tutor into Scene
public class VRTutorController : MonoBehaviour
{
private AdaptiveTutorClient tutorClient;
private AudioSource feedbackAudio;
public async void OnActionCompleted(string stepId, bool isCorrect, Dictionary<string, object> details)
{
var action = new VRAction(
actionType: isCorrect ? "step_completed" : "error",
stepId: stepId,
timestamp: Time.time,
details: details,
correct: isCorrect
);
var result = await tutorClient.AnalyzeAction(action, currentLearnerId);
// Play voice feedback
string feedbackText = result.feedback;
byte[] audio = await TTSService.Synthesize(feedbackText);
feedbackAudio.PlayOneShot(AudioService.BytesToClip(audio));
// Adapt next scenario
ScenarioManager.LoadScenario(result.nextScenario);
}
}
Case study: training operators at a petrochemical plant to handle emergency equipment. 200 operators, 12 required competencies. Before implementation: fixed scenario, everyone proceeds the same way, average final test score — 71%. After 3 months with AI tutor (personalized repetition of weak areas, adaptive difficulty): average score increased to 84%, time to certification level reduced from 8 to 5 sessions.
Timeframe: basic tutor with feedback and progress tracking: 6–8 weeks; adaptive engine with IRT and analytics: 3–4 months.







