Skip to content
← All Examples
🎓

Education: Learning Analytics

A learning analytics system where causal tracing distinguishes whether an AI tutor intervention actually caused a student's improvement, or whether the student improved independently.

Important
Education research struggles with the distinction between correlation and causation. A student who received tutoring and improved might have improved anyway. PyRapide's causal graph makes the causal chain explicit and auditable.

Architecture Overview

  • LearningPlatform is the student-facing LMS, emitting lesson starts, exercise submissions, hint requests, and completions.
  • AITutor is the adaptive tutoring engine that intervenes with scaffolding, adjusts difficulty, and updates learning paths.
  • AssessmentEngine generates quizzes, evaluates results, and certifies mastery.

Interfaces

learning_interfaces.py python
1from pyrapide import interface, action, module, when
2from pyrapide import architecture, connect, Pattern, Engine
3from pyrapide import must_match, never
4import asyncio
5
6# ── Interfaces ──────────────────────────────────────────
7
8@interface
9class LearningPlatform:
10    """Student-facing learning management system."""
11    @action
12    async def lesson_started(self, student_id: str, lesson_id: str,
13                              subject: str) -> None: ...
14    @action
15    async def exercise_submitted(self, student_id: str, lesson_id: str,
16                                  answer: str, time_spent_s: float) -> None: ...
17    @action
18    async def lesson_completed(self, student_id: str, lesson_id: str,
19                                score: float) -> None: ...
20    @action
21    async def hint_requested(self, student_id: str, lesson_id: str,
22                              exercise_id: str) -> None: ...
23
24@interface
25class AITutor:
26    """Adaptive AI tutoring engine."""
27    @action
28    async def intervention(self, student_id: str, type: str,
29                            content: str, reason: str) -> None: ...
30    @action
31    async def difficulty_adjusted(self, student_id: str, lesson_id: str,
32                                   new_level: str, reason: str) -> None: ...
33    @action
34    async def learning_path_updated(self, student_id: str,
35                                     next_lessons: list[str]) -> None: ...
36
37@interface
38class AssessmentEngine:
39    """Formal assessment and mastery evaluation."""
40    @action
41    async def quiz_generated(self, student_id: str, topic: str,
42                              difficulty: str) -> None: ...
43    @action
44    async def quiz_result(self, student_id: str, topic: str,
45                           score: float, mastery: bool) -> None: ...
46    @action
47    async def mastery_certified(self, student_id: str,
48                                 topic: str, level: str) -> None: ...

Module Logic

learning_modules.py python
1# ── Modules ─────────────────────────────────────────────
2
3@module(implements=AITutor)
4class AdaptiveTutor:
5    @when("LearningPlatform.exercise_submitted")
6    async def evaluate_and_adapt(self, event):
7        data = event.data
8        if data["time_spent_s"] > 300:  # Struggling
9            await self.intervention(
10                student_id=data["student_id"],
11                type="scaffolding",
12                content="Let me break this down step by step...",
13                reason=f"Student spent {data['time_spent_s']}s on exercise"
14            )
15            await self.difficulty_adjusted(
16                student_id=data["student_id"],
17                lesson_id=data["lesson_id"],
18                new_level="easier",
19                reason="Extended time on exercise indicates difficulty"
20            )
21
22    @when("LearningPlatform.hint_requested")
23    async def provide_hint(self, event):
24        await self.intervention(
25            student_id=event.data["student_id"],
26            type="hint",
27            content="Consider the relationship between...",
28            reason="Student requested help"
29        )
30
31    @when("AssessmentEngine.quiz_result")
32    async def update_path(self, event):
33        data = event.data
34        if data["mastery"]:
35            await self.learning_path_updated(
36                student_id=data["student_id"],
37                next_lessons=["advanced_topic_1", "advanced_topic_2"]
38            )
39
40@module(implements=AssessmentEngine)
41class MasteryAssessor:
42    @when("LearningPlatform.lesson_completed")
43    async def generate_assessment(self, event):
44        data = event.data
45        if data["score"] >= 0.7:
46            await self.quiz_generated(
47                student_id=data["student_id"],
48                topic=data["lesson_id"],
49                difficulty="standard"
50            )

Architecture and Constraints

  • intervention_must_have_reason: every AI action must be explainable.
  • must_attempt_exercise: lessons cannot be completed without student engagement.
  • mastery_requires_quiz: mastery certification requires formal assessment.
  • limit_difficulty_reduction: prevents runaway difficulty lowering without reassessment.
learning_architecture.py python
1# ── Architecture ────────────────────────────────────────
2
3@architecture
4class LearningAnalyticsSystem:
5    platform: LearningPlatform
6    tutor: AITutor
7    assessment: AssessmentEngine
8
9    def connections(self):
10        return [
11            # Student activity flows to the AI tutor
12            connect(Pattern.match("LearningPlatform.*"), "tutor"),
13            # Lesson completions trigger assessments
14            connect(Pattern.match("LearningPlatform.lesson_completed"),
15                    "assessment"),
16            # Assessment results feed back to the tutor
17            connect(Pattern.match("AssessmentEngine.quiz_result"), "tutor"),
18            # Tutor adjustments feed back to the platform
19            connect(Pattern.match("AITutor.difficulty_adjusted"), "platform"),
20        ]
21
22    def constraints(self):
23        return [
24            # Every AI intervention must have a documented reason
25            # (no unexplained behavioral changes)
26            must_match(
27                trigger="AITutor.intervention",
28                condition=lambda e: len(e.data["reason"]) > 0,
29                name="intervention_must_have_reason"
30            ),
31
32            # A lesson must not be marked complete without
33            # an exercise submission
34            never(
35                pattern=("LearningPlatform.lesson_started",
36                         "LearningPlatform.lesson_completed"),
37                unless="LearningPlatform.exercise_submitted",
38                name="must_attempt_exercise"
39            ),
40
41            # Mastery certification requires a quiz result
42            must_match(
43                trigger="AssessmentEngine.quiz_result",
44                response="AssessmentEngine.mastery_certified",
45                condition=lambda e: e.data["mastery"] is True,
46                name="mastery_requires_quiz"
47            ),
48
49            # Difficulty must never be reduced more than
50            # two levels in sequence without reassessment
51            never(
52                pattern=("AITutor.difficulty_adjusted",
53                         "AITutor.difficulty_adjusted",
54                         "AITutor.difficulty_adjusted"),
55                condition=lambda e1, e2, e3: all(
56                    e.data["new_level"] == "easier" for e in [e1, e2, e3]
57                ),
58                unless="AssessmentEngine.quiz_result",
59                name="limit_difficulty_reduction"
60            ),
61        ]

Execution and Causal Analysis

learning_analysis.py python
1# ── Execute and Analyze Learning Outcomes ───────────────
2
3async def main():
4    engine = Engine()
5    computation = await engine.run(LearningAnalyticsSystem)
6
7    from pyrapide import (
8        root_causes, backward_slice, forward_slice,
9        causal_distance, common_ancestors
10    )
11
12    # For each mastery certification, trace the learning journey
13    certs = [e for e in computation.events
14             if e.name == "AssessmentEngine.mastery_certified"]
15
16    for cert in certs:
17        journey = backward_slice(computation, cert)
18        interventions = [e for e in journey
19                        if e.name == "AITutor.intervention"]
20        print(f"Student {cert.data['student_id']} achieved mastery:")
21        print(f"  Journey length: {len(journey)} events")
22        print(f"  AI interventions: {len(interventions)}")
23
24    # Distinguish correlation from causation:
25    # Did the AI intervention CAUSE the improvement,
26    # or did the student improve independently?
27    for intervention in interventions:
28        impact = forward_slice(computation, intervention)
29        improved = [e for e in impact
30                   if e.name == "AssessmentEngine.quiz_result"
31                   and e.data["mastery"]]
32        if improved:
33            dist = causal_distance(computation, intervention, improved[0])
34            print(f"  Intervention -> mastery: {dist} causal steps")
35
36asyncio.run(main())
💡 Tip
The key insight: forward_slice from an AI intervention shows everything that intervention causally affected. If a mastery certification is in the forward slice, the intervention caused the improvement. If it is not, the improvement was independent.