diff --git a/controllers/auth.py b/controllers/auth.py index 2220ed50d..9d179f4dd 100644 --- a/controllers/auth.py +++ b/controllers/auth.py @@ -233,11 +233,18 @@ def create_user_token() -> dict: def handle_login_change(old_user): + from models.counters.helpers import update_user_activity if old_user != g.user: flask_security.utils.logout_user() flask_security.utils.login_user(g.user, remember=True) + # Track login activity + if g.user and g.user.id: + update_user_activity(g.user.id, 'login') elif not old_user: flask_security.utils.login_user(g.user, remember=True) + # Track login activity + if g.user and g.user.id: + update_user_activity(g.user.id, 'login') def load_jwt_user(user_id, claims): diff --git a/controllers/endpoints/blockpy.py b/controllers/endpoints/blockpy.py index c923f9e7f..f13abafb1 100644 --- a/controllers/endpoints/blockpy.py +++ b/controllers/endpoints/blockpy.py @@ -39,6 +39,8 @@ from common.highlighters import highlight_python_code from tasks.tasks import queue_lti_post_grade from models.user import User +from models.counters.helpers import (update_edit_time, update_run_count, update_error_counts, + increment_submission_count, update_user_activity) blueprint_blockpy = Blueprint('blockpy', __name__, url_prefix='/blockpy') @@ -258,6 +260,10 @@ def save_student_file(filename, course_id, user): make_log_entry(submission.id, submission.version, submission.assignment_id, submission.assignment_version, course_id, submission.user_id, "File.Edit", filename + ("#" + part_id if part_id else ""), message=new_code) + + # Update user activity timestamp + update_user_activity(submission.user_id, 'edit') + return ajax_success({"version_change": version_change}) @@ -327,6 +333,19 @@ def log_event(): # Make the entry new_log = make_log_entry(submission_id, submission_version, assignment_id, assignment_version, course_id, user_id, event_type, file_path, category, label, message) + + # Update counts based on event type + if event_type == 'X-run' or event_type == 'Run': + update_run_count(submission_id) + elif event_type == 'feedback': + # Track error types based on feedback category + if 'syntax' in category.lower() or 'syntax' in label.lower(): + update_error_counts(submission_id, 'syntax') + elif 'runtime' in category.lower() or 'runtime' in label.lower(): + update_error_counts(submission_id, 'runtime') + elif 'instructor' in category.lower() or 'test' in category.lower(): + update_error_counts(submission_id, 'instructor_test') + return ajax_success({"log_id": new_log.id}) diff --git a/models/assignment.py b/models/assignment.py index fc1cd68f2..76ce5745e 100644 --- a/models/assignment.py +++ b/models/assignment.py @@ -89,6 +89,7 @@ class Assignment(EnhancedBase): submissions: Mapped[list["Submission"]] = db.relationship(back_populates="assignment") memberships: Mapped[list["AssignmentGroupMembership"]] = db.relationship(back_populates="assignment") reports: Mapped[list["Report"]] = db.relationship(back_populates="assignment") + counts: Mapped[Optional["AssignmentCounts"]] = db.relationship(back_populates="assignment", uselist=False) __table_args__ = (Index("assignment_url_index", "url"), Index('assignment_course_index', "course_id")) @@ -216,6 +217,7 @@ def get_available() -> 'List[Tuple[models.Assignment, models.AssignmentGroup]]': @staticmethod def new(owner_id, course_id, type="blockpy", name=None, level=None, url=None) -> 'models.Assignment': """ Create a new Assignment for the course and owner. """ + from models.counters.helpers import increment_course_assignment_count if name is None: name = 'Untitled' assignment = Assignment(owner_id=owner_id, course_id=maybe_int(course_id), @@ -223,6 +225,8 @@ def new(owner_id, course_id, type="blockpy", name=None, level=None, url=None) -> type=type, name=level if type == 'maze' else name) db.session.add(assignment) db.session.commit() + # Track assignment creation in course counts + increment_course_assignment_count(maybe_int(course_id)) return assignment def move_course(self, new_course_id: int): diff --git a/models/counters/README.md b/models/counters/README.md new file mode 100644 index 000000000..d4d31abdf --- /dev/null +++ b/models/counters/README.md @@ -0,0 +1,162 @@ +# Counts Tables Documentation + +## Overview + +The counts tables provide an efficient way to track usage statistics and metrics across the BlockPy server. Instead of querying historical log data every time statistics are needed, these tables maintain running sums, averages, and other aggregated metrics that are updated in real-time as events occur. + +## Architecture + +The counts system consists of four main tables: + +### 1. SubmissionCounts +Tracks statistics for individual submissions. + +**Fields:** +- `runs` - Number of times the submission has been run +- `average_edit_time` - Running average of time between edits (in seconds) +- `average_attempt_time` - Running average of time between submission attempts +- `estimated_time_spent` - Estimated total time spent on the submission +- `syntax_errors` - Count of syntax errors encountered +- `runtime_errors` - Count of runtime errors encountered +- `failed_instructor_tests` - Count of failed instructor test cases + +### 2. AssignmentCounts +Tracks statistics for assignments. + +**Fields:** +- `total_submissions` - Total number of submissions for this assignment +- `date_last_submission` - Timestamp of the most recent submission + +### 3. CourseCounts +Tracks statistics for courses. + +**Fields:** +- `total_submissions` - Total number of submissions in the course +- `total_assignments` - Total number of assignments in the course +- `total_assignment_groups` - Total number of assignment groups +- `total_users` - Total number of users enrolled in the course +- `total_students` - Total number of students in the course +- `total_instructors` - Total number of instructors in the course +- `date_last_user` - Timestamp when the last user was added +- `date_last_submission` - Timestamp of the most recent submission +- `date_last_assignment` - Timestamp when the last assignment was created + +### 4. UserCounts +Tracks statistics for users. + +**Fields:** +- `total_courses_in` - Total number of courses the user is enrolled in +- `total_assignments` - Total number of assignments assigned to the user +- `total_assignment_groups` - Total number of assignment groups +- `total_submissions` - Total number of submissions by the user +- `total_reports` - Total number of reports created by the user +- `estimated_time_spent` - Estimated total time spent by the user +- `last_logged_in` - Timestamp of the user's last login +- `last_edited` - Timestamp of the user's last edit + +## Helper Functions + +The `models/counters/helpers.py` module provides functions for updating counts: + +### Ensure Functions +These functions ensure that a counts record exists for a given entity, creating one if necessary: +- `ensure_submission_counts(submission_id)` +- `ensure_assignment_counts(assignment_id)` +- `ensure_course_counts(course_id)` +- `ensure_user_counts(user_id)` + +### Update Functions +These functions update specific metrics: + +- `update_edit_time(submission_id, time_delta)` - Updates average edit time using Welford's online algorithm +- `update_run_count(submission_id)` - Increments run count +- `update_error_counts(submission_id, error_type)` - Increments error counts by type +- `increment_submission_count(assignment_id, course_id, user_id)` - Updates submission counts across all relevant tables +- `update_user_activity(user_id, activity_type)` - Updates user login/edit timestamps +- `increment_course_assignment_count(course_id)` - Increments assignment count for a course +- `increment_course_user_count(course_id, role)` - Increments user count for a course based on role + +### Recalculation Function +- `recalculate_submission_counts_from_logs(submission_id)` - Recalculates counts from historical log data + +This is useful for: +- Backfilling counts for existing data +- Fixing discrepancies +- Initial population of counts tables + +## Integration Points + +The counts tracking is automatically integrated at the following points: + +### Submission Events +- **Creation** (`models/submission.py:from_assignment`) - Increments submission counts +- **Code Save** (`controllers/endpoints/blockpy.py:save_student_file`) - Updates user edit timestamp +- **Run Event** (`controllers/endpoints/blockpy.py:log_event`) - Increments run count +- **Feedback Event** (`controllers/endpoints/blockpy.py:log_event`) - Tracks error counts + +### Assignment Events +- **Creation** (`models/assignment.py:new`) - Increments course assignment count + +### Course & User Events +- **Course Creation** (`models/course.py:new`) - Tracks instructor addition +- **Role Addition** (`models/user.py:add_role`, `update_roles`) - Tracks user enrollment in courses +- **Login** (`controllers/auth.py:handle_login_change`) - Updates last login timestamp + +## Running Statistics Algorithm + +The average edit time uses Welford's online algorithm for computing running averages: + +```python +new_avg = old_avg + (new_value - old_avg) / count +``` + +This allows us to maintain accurate averages without storing all historical data points, which is critical for performance with large datasets. + +## Performance Benefits + +By maintaining these counts: + +1. **Instant Statistics** - No need to count millions of log entries +2. **Reduced Database Load** - Avoid expensive aggregation queries +3. **Scalability** - Constant-time updates regardless of historical data size +4. **Real-time Insights** - Statistics are always up-to-date + +## Usage Example + +```python +from models.counters.helpers import ensure_submission_counts + +# Get counts for a submission +counts = ensure_submission_counts(submission_id) + +# Access statistics +print(f"Runs: {counts.runs}") +print(f"Average edit time: {counts.average_edit_time} seconds") +print(f"Syntax errors: {counts.syntax_errors}") +``` + +## Backfilling Historical Data + +To populate counts for existing data: + +```python +from models.counters.helpers import recalculate_submission_counts_from_logs +from models.submission import Submission + +# For a single submission +recalculate_submission_counts_from_logs(submission_id) + +# For all submissions (in a management command) +for submission in Submission.query.all(): + recalculate_submission_counts_from_logs(submission.id) +``` + +## Future Enhancements + +Potential future additions to the counts system: + +1. **Standard Deviation Tracking** - Using Welford's algorithm for variance +2. **Percentile Tracking** - Using t-digest or similar algorithms +3. **Time-series Data** - Daily/weekly aggregates for trend analysis +4. **Assignment Group Counts** - Similar to other entities +5. **Report Counts** - Tracking report generation and usage diff --git a/models/counters/helpers.py b/models/counters/helpers.py new file mode 100644 index 000000000..568d7c980 --- /dev/null +++ b/models/counters/helpers.py @@ -0,0 +1,294 @@ +""" +Helper functions for updating counts tables with running statistics. + +These functions implement incremental algorithms for computing running sums, averages, +and standard deviations without needing to recompute from all historical data. +""" +from datetime import datetime +from typing import Optional +from sqlalchemy_utc import utcnow +from sqlalchemy import func, or_ + +from models.generics.models import db +import models + + +def ensure_submission_counts(submission_id: int) -> "models.SubmissionCounts": + """ + Ensure a SubmissionCounts record exists for the given submission. + Creates one if it doesn't exist. + + Args: + submission_id: The ID of the submission + + Returns: + The SubmissionCounts object + """ + from models.counters.submission_counts import SubmissionCounts + + counts = SubmissionCounts.query.filter_by(submission_id=submission_id).first() + if counts is None: + counts = SubmissionCounts(submission_id=submission_id) + db.session.add(counts) + db.session.commit() + return counts + + +def ensure_assignment_counts(assignment_id: int) -> "models.AssignmentCounts": + """ + Ensure an AssignmentCounts record exists for the given assignment. + Creates one if it doesn't exist. + + Args: + assignment_id: The ID of the assignment + + Returns: + The AssignmentCounts object + """ + from models.counters.assignment_counts import AssignmentCounts + + counts = AssignmentCounts.query.filter_by(assignment_id=assignment_id).first() + if counts is None: + counts = AssignmentCounts(assignment_id=assignment_id) + db.session.add(counts) + db.session.commit() + return counts + + +def ensure_course_counts(course_id: int) -> "models.CourseCounts": + """ + Ensure a CourseCounts record exists for the given course. + Creates one if it doesn't exist. + + Args: + course_id: The ID of the course + + Returns: + The CourseCounts object + """ + from models.counters.course_counts import CourseCounts + + counts = CourseCounts.query.filter_by(course_id=course_id).first() + if counts is None: + counts = CourseCounts(course_id=course_id) + db.session.add(counts) + db.session.commit() + return counts + + +def ensure_user_counts(user_id: int) -> "models.UserCounts": + """ + Ensure a UserCounts record exists for the given user. + Creates one if it doesn't exist. + + Args: + user_id: The ID of the user + + Returns: + The UserCounts object + """ + from models.counters.user_counts import UserCounts + + counts = UserCounts.query.filter_by(user_id=user_id).first() + if counts is None: + counts = UserCounts(user_id=user_id) + db.session.add(counts) + db.session.commit() + return counts + + +def update_edit_time(submission_id: int, time_delta: float): + """ + Update the average edit time for a submission using Welford's online algorithm. + + This computes the running average without storing all historical values. + + Args: + submission_id: The ID of the submission + time_delta: The time in seconds since the last edit + """ + counts = ensure_submission_counts(submission_id) + + # Increment the run count (we use runs as proxy for edits) + counts.runs += 1 + + # Update average edit time using Welford's algorithm + # new_avg = old_avg + (new_value - old_avg) / count + if counts.average_edit_time is None: + counts.average_edit_time = time_delta + else: + counts.average_edit_time = counts.average_edit_time + (time_delta - counts.average_edit_time) / counts.runs + + db.session.commit() + + +def update_run_count(submission_id: int): + """ + Increment the run count for a submission. + + Args: + submission_id: The ID of the submission + """ + counts = ensure_submission_counts(submission_id) + counts.runs += 1 + db.session.commit() + + +def update_error_counts(submission_id: int, error_type: str): + """ + Update error counts for a submission based on feedback type. + + Args: + submission_id: The ID of the submission + error_type: The type of error (syntax, runtime, or instructor_test) + """ + counts = ensure_submission_counts(submission_id) + + if error_type == 'syntax': + counts.syntax_errors += 1 + elif error_type == 'runtime': + counts.runtime_errors += 1 + elif error_type == 'instructor_test': + counts.failed_instructor_tests += 1 + + db.session.commit() + + +def increment_submission_count(assignment_id: int, course_id: int, user_id: int): + """ + Increment submission counts across assignment, course, and user. + + Args: + assignment_id: The ID of the assignment + course_id: The ID of the course + user_id: The ID of the user + """ + # Update assignment counts + assignment_counts = ensure_assignment_counts(assignment_id) + assignment_counts.total_submissions += 1 + assignment_counts.date_last_submission = utcnow() + + # Update course counts + course_counts = ensure_course_counts(course_id) + course_counts.total_submissions += 1 + course_counts.date_last_submission = utcnow() + + # Update user counts + user_counts = ensure_user_counts(user_id) + user_counts.total_submissions += 1 + + db.session.commit() + + +def update_user_activity(user_id: int, activity_type: str): + """ + Update user activity timestamps. + + Args: + user_id: The ID of the user + activity_type: The type of activity ('login' or 'edit') + """ + user_counts = ensure_user_counts(user_id) + + if activity_type == 'login': + user_counts.last_logged_in = utcnow() + elif activity_type == 'edit': + user_counts.last_edited = utcnow() + + db.session.commit() + + +def increment_course_assignment_count(course_id: int): + """ + Increment the assignment count for a course. + + Args: + course_id: The ID of the course + """ + course_counts = ensure_course_counts(course_id) + course_counts.total_assignments += 1 + course_counts.date_last_assignment = utcnow() + db.session.commit() + + +def increment_course_user_count(course_id: int, role: str): + """ + Increment user counts for a course based on role. + + Args: + course_id: The ID of the course + role: The role of the user (student, learner, or instructor) + """ + course_counts = ensure_course_counts(course_id) + course_counts.total_users += 1 + course_counts.date_last_user = utcnow() + + # Map role names to count fields + role_lower = role.lower() + if role_lower in ('student', 'learner'): + course_counts.total_students += 1 + elif role_lower == 'instructor': + course_counts.total_instructors += 1 + + db.session.commit() + + +def recalculate_submission_counts_from_logs(submission_id: int): + """ + Recalculate submission counts from historical log data. + This is useful for backfilling counts or fixing discrepancies. + + Note: This function uses ILIKE queries which may not be fully indexed. + This is acceptable since this function is intended for one-time backfilling + or occasional recalculation, not for frequent real-time use. + + Args: + submission_id: The ID of the submission + """ + from models.log_tables import SubmissionLog + + counts = ensure_submission_counts(submission_id) + + # Count runs + run_count = SubmissionLog.query.filter( + SubmissionLog.submission_id == submission_id, + SubmissionLog.event_type.in_(['X-run', 'Run']) + ).count() + counts.runs = run_count + + # Count errors (using ILIKE for pattern matching - acceptable for backfilling) + syntax_errors = SubmissionLog.query.filter( + SubmissionLog.submission_id == submission_id, + SubmissionLog.event_type == 'feedback' + ).filter( + or_( + SubmissionLog.category.ilike('%syntax%'), + SubmissionLog.label.ilike('%syntax%') + ) + ).count() + counts.syntax_errors = syntax_errors + + runtime_errors = SubmissionLog.query.filter( + SubmissionLog.submission_id == submission_id, + SubmissionLog.event_type == 'feedback' + ).filter( + or_( + SubmissionLog.category.ilike('%runtime%'), + SubmissionLog.label.ilike('%runtime%') + ) + ).count() + counts.runtime_errors = runtime_errors + + instructor_test_failures = SubmissionLog.query.filter( + SubmissionLog.submission_id == submission_id, + SubmissionLog.event_type == 'feedback' + ).filter( + or_( + SubmissionLog.category.ilike('%instructor%'), + SubmissionLog.category.ilike('%test%') + ) + ).count() + counts.failed_instructor_tests = instructor_test_failures + + db.session.commit() + return counts diff --git a/models/course.py b/models/course.py index 5c70cc366..cdf17dc3f 100644 --- a/models/course.py +++ b/models/course.py @@ -53,6 +53,7 @@ class Course(Base): submissions: Mapped[list["Submission"]] = db.relationship(back_populates="course") invites: Mapped[list["Invite"]] = db.relationship(back_populates="course") reports: Mapped[list["Report"]] = db.relationship(back_populates="course") + counts: Mapped[Optional["CourseCounts"]] = db.relationship(back_populates="course", uselist=False) __table_args__ = (Index('course_url_index', "url"),) @@ -311,6 +312,7 @@ def edit(self, name=None, url=None, visibility=None, term=None, settings=None): @staticmethod def new(name, owner_id, visibility, term, url): + from models.counters.helpers import increment_course_user_count if visibility and isinstance(visibility.lower(), CourseVisibility): visibility = visibility.lower() else: @@ -325,6 +327,8 @@ def new(name, owner_id, visibility, term, url): new_role = models.Role(name='instructor', user_id=owner_id, course_id=new_course.id) db.session.add(new_role) db.session.commit() + # Track instructor addition to course + increment_course_user_count(new_course.id, 'instructor') return new_course @staticmethod diff --git a/models/submission.py b/models/submission.py index 994b300e7..3850e76ad 100644 --- a/models/submission.py +++ b/models/submission.py @@ -72,6 +72,7 @@ class Submission(EnhancedBase): reviews: Mapped[list["Review"]] = db.relationship(back_populates="submission") grade_history: Mapped[list["GradeHistory"]] = db.relationship(back_populates="submission") submission_logs: Mapped[list["SubmissionLog"]] = db.relationship(back_populates="submission") + submission_counts: Mapped[Optional["SubmissionCounts"]] = db.relationship(back_populates="submission", uselist=False) __table_args__ = (Index('submission_index', "course_id", "assignment_id", "user_id"), @@ -320,6 +321,7 @@ def get_reviews_db(self): @staticmethod def from_assignment(assignment, user_id, course_id, assignment_group_id=None): + from models.counters.helpers import increment_submission_count submission = Submission(assignment_id=assignment.id, user_id=user_id, assignment_group_id=assignment_group_id, @@ -332,6 +334,8 @@ def from_assignment(assignment, user_id, course_id, assignment_group_id=None): # TODO: Log extra starting files! SubmissionLog.new(submission.id, submission.version, assignment.id, assignment.version, course_id, user_id, "File.Create", "answer.py", "", "", assignment.starting_code, "", "") + # Track submission counts + increment_submission_count(assignment.id, course_id, user_id) return submission @staticmethod diff --git a/models/user.py b/models/user.py index 8d02415d5..bb291d1c1 100644 --- a/models/user.py +++ b/models/user.py @@ -58,6 +58,7 @@ class User(Base, UserMixin): invites: Mapped[list["Invite"]] = db.relationship(back_populates="user", foreign_keys="Invite.user_id") approvals: Mapped[list["Invite"]] = db.relationship(back_populates="approver", foreign_keys="Invite.approver_id") grade_history: Mapped[list["GradeHistory"]] = db.relationship(back_populates="grader") + user_counts: Mapped[Optional["UserCounts"]] = db.relationship(back_populates="user", uselist=False) def encode_json(self, use_owner=True): return { @@ -234,15 +235,19 @@ def is_test_user(self, course_id=None): ### Adding and updating roles ### def add_role(self, name, course_id): + from models.counters.helpers import increment_course_user_count if name in [id for id, _ in USER_DISPLAY_ROLES.items()]: new_role = models.Role(name=name, user_id=self.id, course_id=maybe_int(course_id)) db.session.add(new_role) db.session.commit() + # Track user addition to course + increment_course_user_count(maybe_int(course_id), name.lower()) return new_role return None def update_roles(self, new_roles, course_id): + from models.counters.helpers import increment_course_user_count old_roles = [role for role in self.roles if role.course_id == maybe_int(course_id)] new_role_names = set(new_role_name.lower() for new_role_name in new_roles) for old_role in old_roles: @@ -253,6 +258,8 @@ def update_roles(self, new_roles, course_id): if new_role_name.lower() not in old_role_names: new_role = models.Role(name=new_role_name.lower(), user_id=self.id, course_id=maybe_int(course_id)) db.session.add(new_role) + # Track user addition to course + increment_course_user_count(maybe_int(course_id), new_role_name.lower()) db.session.commit() def determine_role(self, assignments, submissions): diff --git a/tests/test_counts.py b/tests/test_counts.py new file mode 100644 index 000000000..54d28be97 --- /dev/null +++ b/tests/test_counts.py @@ -0,0 +1,97 @@ +""" +Basic tests for counts tracking functionality. + +This tests the helper functions that track statistics in the counts tables. +""" +import unittest +import sys +import os + +# Add the parent directory to the path +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +class TestCountsHelpers(unittest.TestCase): + """Test the counts helper functions.""" + + def test_imports(self): + """Test that the counts helper module imports correctly.""" + try: + from models.counters import helpers + self.assertTrue(hasattr(helpers, 'ensure_submission_counts')) + self.assertTrue(hasattr(helpers, 'ensure_assignment_counts')) + self.assertTrue(hasattr(helpers, 'ensure_course_counts')) + self.assertTrue(hasattr(helpers, 'ensure_user_counts')) + self.assertTrue(hasattr(helpers, 'update_edit_time')) + self.assertTrue(hasattr(helpers, 'update_run_count')) + self.assertTrue(hasattr(helpers, 'update_error_counts')) + self.assertTrue(hasattr(helpers, 'increment_submission_count')) + self.assertTrue(hasattr(helpers, 'update_user_activity')) + self.assertTrue(hasattr(helpers, 'increment_course_assignment_count')) + self.assertTrue(hasattr(helpers, 'increment_course_user_count')) + except ImportError as e: + self.fail(f"Failed to import counts helpers: {e}") + + def test_model_relationships(self): + """Test that model relationships exist for counts tables.""" + try: + from models.submission import Submission + from models.assignment import Assignment + from models.course import Course + from models.user import User + + # Check that the relationships are defined + # Note: We can't instantiate without a database, but we can check the class attributes + self.assertTrue(hasattr(Submission, 'submission_counts')) + self.assertTrue(hasattr(Assignment, 'counts')) + self.assertTrue(hasattr(Course, 'counts')) + self.assertTrue(hasattr(User, 'user_counts')) + except Exception as e: + self.fail(f"Failed to verify model relationships: {e}") + + +class TestWelfordAlgorithm(unittest.TestCase): + """Test the Welford's algorithm implementation for running averages.""" + + def test_running_average_calculation(self): + """Test that the running average calculation is correct.""" + # Simulate the algorithm used in update_edit_time + values = [10.0, 20.0, 30.0, 40.0, 50.0] + + avg = None + count = 0 + for value in values: + count += 1 + if avg is None: + avg = value + else: + avg = avg + (value - avg) / count + + # The average should be 30.0 + self.assertAlmostEqual(avg, 30.0, places=2) + + def test_running_average_single_value(self): + """Test running average with a single value.""" + value = 42.0 + avg = value + self.assertEqual(avg, 42.0) + + def test_running_average_stability(self): + """Test that running average is stable with many values.""" + values = [float(i) for i in range(1, 101)] # 1 to 100 + + avg = None + count = 0 + for value in values: + count += 1 + if avg is None: + avg = value + else: + avg = avg + (value - avg) / count + + # The average of 1 to 100 should be 50.5 + self.assertAlmostEqual(avg, 50.5, places=2) + + +if __name__ == '__main__': + unittest.main()