From 94bd66f9b6ef03791b8d483802560c83c1584094 Mon Sep 17 00:00:00 2001 From: Jonas Dittrich Date: Mon, 13 Apr 2026 21:04:12 +0200 Subject: [PATCH 1/5] prefetch participant and voter counts in contributor index --- evap/contributor/views.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/evap/contributor/views.py b/evap/contributor/views.py index 9fcac49b35..e845b295d1 100644 --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -58,7 +58,7 @@ def index(request): ) own_evaluations = ( - Evaluation.objects.filter(course__in=own_courses) + Evaluation.annotate_with_participant_and_voter_counts(Evaluation.objects.filter(course__in=own_courses)) .annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user))) .prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester") ) @@ -77,9 +77,10 @@ def index(request): ) ) ) - delegated_evaluations = Evaluation.objects.filter(course__in=delegated_courses).prefetch_related( - "course", "course__evaluations", "course__programs", "course__type", "course__semester" - ) + delegated_evaluations = Evaluation.annotate_with_participant_and_voter_counts( + Evaluation.objects.filter(course__in=delegated_courses) + ).prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester", "course__responsibles") + delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)] for evaluation in delegated_evaluations: evaluation.delegated_evaluation = True From 61ffe81b5ac47d0d9f78d5a4796f0a708af6e459 Mon Sep 17 00:00:00 2001 From: Jonas Dittrich Date: Wed, 29 Apr 2026 17:05:06 +0200 Subject: [PATCH 2/5] allow passing annotated_evaluations to calculate_average_course_distribution POC that is nlogn in evaluations. Can be improved by smarter evaluation sorting --- evap/results/tools.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/evap/results/tools.py b/evap/results/tools.py index 6fdf8ae064..ca6289e011 100644 --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -3,6 +3,7 @@ from collections.abc import Iterable from copy import copy from enum import Enum +from itertools import takewhile from math import ceil, modf from typing import TypeGuard, cast @@ -340,9 +341,13 @@ def average_non_grade_rating_questions_distribution(results): ) -def calculate_average_course_distribution(course, check_for_unpublished_evaluations=True): +def calculate_average_course_distribution(course, check_for_unpublished_evaluations=True, annotated_evaluations=None): if check_for_unpublished_evaluations and course.evaluations.exclude(state=Evaluation.State.PUBLISHED).exists(): return None + if annotated_evaluations is None: + annotated_evaluations = course.evaluations.all() + else: + annotated_evaluations = takewhile(lambda e: e.course == course, annotated_evaluations) return avg_distribution( [ @@ -350,7 +355,7 @@ def calculate_average_course_distribution(course, check_for_unpublished_evaluati calculate_average_distribution(evaluation), evaluation.weight, ) - for evaluation in course.evaluations.all() + for evaluation in annotated_evaluations ] ) @@ -361,6 +366,12 @@ def get_evaluations_with_course_result_attributes(evaluations): .filter(Exists(Evaluation.objects.filter(course=OuterRef("pk")).exclude(state=Evaluation.State.PUBLISHED))) .values_list("id", flat=True) ) + courses_without_unpublished_evaluations = Course.objects.filter(evaluations__in=evaluations).exclude( + id__in=courses_with_unpublished_evaluations + ) + course_distribution_evaluations = Evaluation.annotate_with_participant_and_voter_counts( + Evaluation.objects.filter(course__in=courses_without_unpublished_evaluations) + ).order_by("course__id") course_id_evaluation_weight_sum_pairs = ( Course.objects.annotate(Sum("evaluations__weight")) @@ -370,12 +381,14 @@ def get_evaluations_with_course_result_attributes(evaluations): evaluation_weight_sum_per_course_id = {entry[0]: entry[1] for entry in course_id_evaluation_weight_sum_pairs} - for evaluation in evaluations: + for evaluation in sorted(evaluations, key=lambda e: e.course.id): if evaluation.course.id in courses_with_unpublished_evaluations: evaluation.course.not_all_evaluations_are_published = True evaluation.course.distribution = None else: - evaluation.course.distribution = calculate_average_course_distribution(evaluation.course, False) + evaluation.course.distribution = calculate_average_course_distribution( + evaluation.course, False, course_distribution_evaluations + ) evaluation.course.evaluation_count = evaluation.course.evaluations.count() evaluation.course.avg_grade = distribution_to_grade(evaluation.course.distribution) From ab818d46fc7e02fc9ae89df09dfc497b3e23081c Mon Sep 17 00:00:00 2001 From: Jonas Dittrich Date: Wed, 29 Apr 2026 17:02:54 +0200 Subject: [PATCH 3/5] reimplement user category with Q lookup --- .../templates/contributor_index.html | 8 +-- evap/contributor/views.py | 23 ++++++-- evap/evaluation/models.py | 52 +++++++++++++++++++ 3 files changed, 76 insertions(+), 7 deletions(-) diff --git a/evap/contributor/templates/contributor_index.html b/evap/contributor/templates/contributor_index.html index 4f228d7b87..e9aed0e23d 100644 --- a/evap/contributor/templates/contributor_index.html +++ b/evap/contributor/templates/contributor_index.html @@ -82,9 +82,9 @@ {% endif %} {% for evaluation in evaluations|dictsort:"name" %} - {% if evaluation.state != evaluation.State.PUBLISHED %} - {% if evaluation|is_user_editor_or_delegate:user %} + {% if evaluation.user_is_editor_or_delegate %} {% if evaluation.state == evaluation.State.PREPARED %} @@ -175,7 +175,7 @@ {% endif %} {% endif %} - {% if evaluation|is_user_responsible_or_contributor_or_delegate:user %} + {% if evaluation.user_is_responsible_or_contributor_or_delegate %} diff --git a/evap/contributor/views.py b/evap/contributor/views.py index e845b295d1..7df8696a26 100644 --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -60,6 +60,13 @@ def index(request): own_evaluations = ( Evaluation.annotate_with_participant_and_voter_counts(Evaluation.objects.filter(course__in=own_courses)) .annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user))) + .annotate(user_is_editor_or_delegate=Evaluation.user_is_editor_or_delegate_Q(user)) + .annotate( + user_is_responsible_or_contributor_or_delegate=Evaluation.user_is_responsible_or_contributor_or_delegate_Q( + user + ) + ) + .annotate(user_can_see_results_page=Evaluation.can_results_page_be_seen_by_Q(user)) .prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester") ) own_evaluations = [evaluation for evaluation in own_evaluations if evaluation.can_be_seen_by(user)] @@ -77,9 +84,19 @@ def index(request): ) ) ) - delegated_evaluations = Evaluation.annotate_with_participant_and_voter_counts( - Evaluation.objects.filter(course__in=delegated_courses) - ).prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester", "course__responsibles") + delegated_evaluations = ( + Evaluation.annotate_with_participant_and_voter_counts( + Evaluation.objects.filter(course__in=delegated_courses) + ) + .annotate(user_is_editor_or_delegate=Evaluation.user_is_editor_or_delegate_Q(user)) + .annotate( + user_is_responsible_or_contributor_or_delegate=Evaluation.user_is_responsible_or_contributor_or_delegate_Q( + user + ) + ) + .annotate(user_can_see_results_page=Evaluation.can_results_page_be_seen_by_Q(user)) + .prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester") + ) delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)] for evaluation in delegated_evaluations: diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index 2d5e2e6aea..29c27c18ce 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -740,6 +740,24 @@ def can_be_seen_by(self, user): ) return True + @staticmethod + def can_be_seen_by_Q(user): + if user.is_manager: + return Q() + + if user.is_reviewer: + return ~Q(state=Evaluation.State.NEW) & ~Q(course__semester__results_are_archived=True) + + base_q = ~Q(state=Evaluation.State.NEW) + if user.is_external: + return base_q & Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user) | Q(participants=user) + + return base_q & ( + ~Q(course__is_private=True) + | Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user) + | Q(participants=user) + ) + def can_results_page_be_seen_by(self, user): if user.is_manager: return True @@ -751,6 +769,27 @@ def can_results_page_be_seen_by(self, user): return self.is_user_responsible_or_contributor_or_delegate(user) return self.can_be_seen_by(user) + @staticmethod + def can_results_page_be_seen_by_Q(user): + if user.is_manager: + return Q() + + if user.is_reviewer: + return ~Q(course__semester__results_are_archived=True) + + base_q = Q(state=Evaluation.State.PUBLISHED) + threshold = settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS + + archived_or_insufficient_q = Q(course__semester__results_are_archived=True) | Q(num_voters__lt=threshold) + restricted_q = ( + base_q & archived_or_insufficient_q & Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user) + ) + + not_archived_and_sufficient_q = ~Q(course__semester__results_are_archived=True) & Q(num_voters__gte=threshold) + available_q = base_q & not_archived_and_sufficient_q & Evaluation.can_be_seen_by_Q(user) + + return restricted_q | available_q + @property def can_reset_to_new(self): return Evaluation.State.PREPARED <= self.state <= Evaluation.State.REVIEWED @@ -996,6 +1035,19 @@ def is_user_editor_or_delegate(self, user): or self.course.responsibles.filter(pk__in=represented_users).exists() ) + @staticmethod + def user_is_editor_or_delegate_Q(user): + represented_users = user.represented_users.all() | UserProfile.objects.filter(pk=user.pk) + return Q( + Q(contributions__contributor__in=represented_users, contributions__role=Contribution.Role.EDITOR) + | Q(course__responsibles__in=represented_users) + ) + + @staticmethod + def user_is_responsible_or_contributor_or_delegate_Q(user): + represented_users = user.represented_users.all() | UserProfile.objects.filter(pk=user.pk) + return Q(Q(contributions__contributor__in=represented_users) | Q(course__responsibles__in=represented_users)) + def is_user_responsible_or_contributor_or_delegate(self, user): # early out that saves database hits since is_responsible_or_contributor_or_delegate is a cached_property if not user.is_responsible_or_contributor_or_delegate: From f31e0c852f7f339d7cb70d0cac4b3bc3b7e67537 Mon Sep 17 00:00:00 2001 From: Jonas Dittrich Date: Mon, 13 Apr 2026 21:04:51 +0200 Subject: [PATCH 4/5] update model bakery for bugfix --- pyproject.toml | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f100a74578..83300192e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dev = [ "django-debug-toolbar~=6.0", "django-stubs~=6.0.2", "django-webtest~=1.9.13", - "model-bakery~=1.23.3", + "model-bakery~=1.23.4", "mypy~=1.20.0", "openpyxl-stubs~=0.1.25", "pylint-django~=2.7.0", diff --git a/uv.lock b/uv.lock index 7b2c687674..abf8c9fe78 100644 --- a/uv.lock +++ b/uv.lock @@ -457,7 +457,7 @@ dev = [ { name = "django-debug-toolbar", specifier = "~=6.0" }, { name = "django-stubs", specifier = "~=6.0.2" }, { name = "django-webtest", specifier = "~=1.9.13" }, - { name = "model-bakery", specifier = "~=1.23.3" }, + { name = "model-bakery", specifier = "~=1.23.4" }, { name = "mypy", specifier = "~=1.20.0" }, { name = "openpyxl-stubs", specifier = "~=0.1.25" }, { name = "pylint", specifier = "~=4.0.4" }, @@ -595,14 +595,14 @@ wheels = [ [[package]] name = "model-bakery" -version = "1.23.3" +version = "1.23.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "django" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/88/34/cdac0f2565d34cd73c02b79e3f03c8d9bd4b14084f790e28ab85abb3d245/model_bakery-1.23.3.tar.gz", hash = "sha256:cb94b30eeaf6300336bbfed21f30dbaa01d68e3799e792f60d06e937d5ce46c8", size = 23247, upload-time = "2026-02-13T16:47:01.536Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/51/d3cfd4ab5c1cb9f889c604dc96cc0f1d7a52972ee28fe7a44a9dd705521a/model_bakery-1.23.4.tar.gz", hash = "sha256:0cd8e958e229734bd41feffdcb513a0f079c79c9ddb2e0aad11ed2c23f6d43d5", size = 23812, upload-time = "2026-03-27T09:28:39.75Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6c/70/d2c827e2fb9aee0844da298d65e6df4d42a7bf182592c116af51037904f1/model_bakery-1.23.3-py3-none-any.whl", hash = "sha256:3c378fad570d64b8b15f6cb6acb4f589b0f70d4df823d23257720ba381e18fd5", size = 25436, upload-time = "2026-02-13T16:47:00.408Z" }, + { url = "https://files.pythonhosted.org/packages/95/88/55c8e0a873e7a0f8e0d6c5ca236512cc37555632be178e7ba72bfbe0a619/model_bakery-1.23.4-py3-none-any.whl", hash = "sha256:65bfa0405d3ea60aca8c21e7e184c6617d6c8d32d0493cc75232a4476f471514", size = 25984, upload-time = "2026-03-27T09:28:38.439Z" }, ] [[package]] From dfa092390fa50bb619bfc67529b408eb59202b43 Mon Sep 17 00:00:00 2001 From: Jonas Dittrich Date: Mon, 13 Apr 2026 21:07:07 +0200 Subject: [PATCH 5/5] add constant query test for contributor index --- evap/contributor/tests/test_views.py | 29 ++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/evap/contributor/tests/test_views.py b/evap/contributor/tests/test_views.py index 4db2451b10..c7b27f041e 100644 --- a/evap/contributor/tests/test_views.py +++ b/evap/contributor/tests/test_views.py @@ -6,6 +6,7 @@ from evap.evaluation.models import Contribution, Course, Evaluation, Questionnaire, UserProfile from evap.evaluation.tests.tools import ( + FuzzyInt, WebTest, WebTestWith200Check, create_evaluation_with_responsible_and_editor, @@ -82,8 +83,32 @@ class TestContributorView(WebTestWith200Check): @classmethod def setUpTestData(cls): - users = create_evaluation_with_responsible_and_editor() - cls.test_users = [users["editor"], users["responsible"]] + result = create_evaluation_with_responsible_and_editor() + cls.responsible = result["responsible"] + cls.test_users = [result["editor"], cls.responsible] + cls.evaluation = result["evaluation"] + + def test_num_queries_is_constant(self): + url = "/contributor/" + represented = baker.make(UserProfile, email="represented@example.com") + self.responsible.represented_users.add(represented) + evaluations = baker.make( + Evaluation, + name_en=iter(range(100)), + name_de=iter(range(100)), + state=Evaluation.State.PUBLISHED, + course__responsibles=[represented], + _quantity=100, + _bulk_create=True, + ) + baker.make( + Contribution, + evaluation=iter(evaluations), + _quantity=100, + _bulk_create=True, + ) + with self.assertNumQueries(FuzzyInt(0, 80)): + self.app.get(url, user=self.responsible) class TestContributorEvaluationView(WebTestWith200Check):