Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions evap/contributor/templates/contributor_index.html
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(random line): What is the struggle with the 200x10 case? We would fetch 2000 evaluation instances, and we'd have to prefetch 200 courses, ~20 course types, ~10 programs, ~400 evaluations(instances are already in memory), 1 semester, ~200 responsible. All of this are toy numbers. I suspect the total database query run time for the main query and all the prefetch queries is in the single to two-digit milliseconds.

None of this justifies almost a minute of runtime. Touching on the discussion in #2235: For all test_data.json, we render all result rows for all evaluations (thousands) in a few seconds. There is nothing fancy in the code for that. How can fetching and rendering a few hundred evaluations possibly be slower than that?

Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@
</tr>
{% endif %}
{% for evaluation in evaluations|dictsort:"name" %}
<tr {% if evaluation|is_user_editor_or_delegate:user and evaluation.state == evaluation.State.PREPARED %}
<tr {% if evaluation.user_is_editor_or_delegate and evaluation.state == evaluation.State.PREPARED %}
class="{% if course.evaluation_count > 1 %}evaluation-row{% else %}heading-row{% endif %} hover-row hover-row-info" data-url="{% url 'contributor:evaluation_edit' evaluation.id %}"
{% elif evaluation.state == evaluation.State.PUBLISHED and evaluation|can_results_page_be_seen_by:user %}
{% elif evaluation.state == evaluation.State.PUBLISHED and evaluation.user_can_see_results_page %}
class="{% if course.evaluation_count > 1 %}evaluation-row{% else %}heading-row{% endif %} hover-row results-row" data-url="{% url 'results:evaluation_detail' semester.id evaluation.id %}"
{% else %}
class="{% if course.evaluation_count > 1 %}evaluation-row{% else %}heading-row{% endif %}"
Expand Down Expand Up @@ -139,7 +139,7 @@
{% endif %}
<td class="text-end">
{% if evaluation.state != evaluation.State.PUBLISHED %}
{% if evaluation|is_user_editor_or_delegate:user %}
{% if evaluation.user_is_editor_or_delegate %}
{% if evaluation.state == evaluation.State.PREPARED %}
<a href="{% url 'contributor:evaluation_edit' evaluation.id %}" class="btn btn-primary btn-row-hover"
data-bs-toggle="tooltip" data-bs-placement="top" title="{% translate 'Edit or approve' %}">
Expand Down Expand Up @@ -175,7 +175,7 @@
</a>
{% endif %}
{% endif %}
{% if evaluation|is_user_responsible_or_contributor_or_delegate:user %}
{% if evaluation.user_is_responsible_or_contributor_or_delegate %}
<a href="{% url 'contributor:evaluation_preview' evaluation.id %}" class="btn btn-sm btn-light"
data-bs-toggle="tooltip" data-bs-placement="top" title="{% translate 'Preview' %}">
<span class="fas fa-eye"></span>
Expand Down
29 changes: 27 additions & 2 deletions evap/contributor/tests/test_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from evap.evaluation.models import Contribution, Course, Evaluation, Questionnaire, UserProfile
from evap.evaluation.tests.tools import (
FuzzyInt,
WebTest,
WebTestWith200Check,
create_evaluation_with_responsible_and_editor,
Expand Down Expand Up @@ -82,8 +83,32 @@ class TestContributorView(WebTestWith200Check):

@classmethod
def setUpTestData(cls):
users = create_evaluation_with_responsible_and_editor()
cls.test_users = [users["editor"], users["responsible"]]
result = create_evaluation_with_responsible_and_editor()
cls.responsible = result["responsible"]
cls.test_users = [result["editor"], cls.responsible]
cls.evaluation = result["evaluation"]

def test_num_queries_is_constant(self):
url = "/contributor/"
represented = baker.make(UserProfile, email="represented@example.com")
self.responsible.represented_users.add(represented)
evaluations = baker.make(
Evaluation,
name_en=iter(range(100)),
name_de=iter(range(100)),
state=Evaluation.State.PUBLISHED,
course__responsibles=[represented],
_quantity=100,
_bulk_create=True,
)
baker.make(
Contribution,
evaluation=iter(evaluations),
_quantity=100,
_bulk_create=True,
)
with self.assertNumQueries(FuzzyInt(0, 80)):
self.app.get(url, user=self.responsible)


class TestContributorEvaluationView(WebTestWith200Check):
Expand Down
24 changes: 21 additions & 3 deletions evap/contributor/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,15 @@ def index(request):
)

own_evaluations = (
Evaluation.objects.filter(course__in=own_courses)
Evaluation.annotate_with_participant_and_voter_counts(Evaluation.objects.filter(course__in=own_courses))
.annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user)))
.annotate(user_is_editor_or_delegate=Evaluation.user_is_editor_or_delegate_Q(user))
.annotate(
user_is_responsible_or_contributor_or_delegate=Evaluation.user_is_responsible_or_contributor_or_delegate_Q(
user
)
)
.annotate(user_can_see_results_page=Evaluation.can_results_page_be_seen_by_Q(user))
.prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester")
)
own_evaluations = [evaluation for evaluation in own_evaluations if evaluation.can_be_seen_by(user)]
Expand All @@ -77,9 +84,20 @@ def index(request):
)
)
)
delegated_evaluations = Evaluation.objects.filter(course__in=delegated_courses).prefetch_related(
"course", "course__evaluations", "course__programs", "course__type", "course__semester"
delegated_evaluations = (
Evaluation.annotate_with_participant_and_voter_counts(
Evaluation.objects.filter(course__in=delegated_courses)
)
.annotate(user_is_editor_or_delegate=Evaluation.user_is_editor_or_delegate_Q(user))
.annotate(
user_is_responsible_or_contributor_or_delegate=Evaluation.user_is_responsible_or_contributor_or_delegate_Q(
user
)
)
.annotate(user_can_see_results_page=Evaluation.can_results_page_be_seen_by_Q(user))
.prefetch_related("course", "course__evaluations", "course__programs", "course__type", "course__semester")
)

delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)]
for evaluation in delegated_evaluations:
evaluation.delegated_evaluation = True
Expand Down
52 changes: 52 additions & 0 deletions evap/evaluation/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,24 @@ def can_be_seen_by(self, user):
)
return True

@staticmethod
def can_be_seen_by_Q(user):
if user.is_manager:
return Q()

if user.is_reviewer:
return ~Q(state=Evaluation.State.NEW) & ~Q(course__semester__results_are_archived=True)

base_q = ~Q(state=Evaluation.State.NEW)
if user.is_external:
return base_q & Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user) | Q(participants=user)

return base_q & (
~Q(course__is_private=True)
| Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user)
| Q(participants=user)
)
Comment on lines +743 to +759
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no way to evaluate a Q instance on a python model instance, right? If we can in any way prevent having to re-implement this logic, I would like to prefer that. Hunting after subtle semantic difference bugs between the SQL world and the python world is on my not-to-do-list for this year.

One approach that should always work is: Fetch a list of instances (e.g. evaluations visible to the user) without any prefetching (should be a few milliseconds), and then generate a list of IDs in python and pass that to follow-up queries (which now do "heavy" prefetching)


def can_results_page_be_seen_by(self, user):
if user.is_manager:
return True
Expand All @@ -751,6 +769,27 @@ def can_results_page_be_seen_by(self, user):
return self.is_user_responsible_or_contributor_or_delegate(user)
return self.can_be_seen_by(user)

@staticmethod
def can_results_page_be_seen_by_Q(user):
if user.is_manager:
return Q()

if user.is_reviewer:
return ~Q(course__semester__results_are_archived=True)

base_q = Q(state=Evaluation.State.PUBLISHED)
threshold = settings.VOTER_COUNT_NEEDED_FOR_PUBLISHING_RATING_RESULTS

archived_or_insufficient_q = Q(course__semester__results_are_archived=True) | Q(num_voters__lt=threshold)
restricted_q = (
base_q & archived_or_insufficient_q & Evaluation.user_is_responsible_or_contributor_or_delegate_Q(user)
)

not_archived_and_sufficient_q = ~Q(course__semester__results_are_archived=True) & Q(num_voters__gte=threshold)
available_q = base_q & not_archived_and_sufficient_q & Evaluation.can_be_seen_by_Q(user)

return restricted_q | available_q

@property
def can_reset_to_new(self):
return Evaluation.State.PREPARED <= self.state <= Evaluation.State.REVIEWED
Expand Down Expand Up @@ -996,6 +1035,19 @@ def is_user_editor_or_delegate(self, user):
or self.course.responsibles.filter(pk__in=represented_users).exists()
)

@staticmethod
def user_is_editor_or_delegate_Q(user):
represented_users = user.represented_users.all() | UserProfile.objects.filter(pk=user.pk)
return Q(
Q(contributions__contributor__in=represented_users, contributions__role=Contribution.Role.EDITOR)
| Q(course__responsibles__in=represented_users)
)

@staticmethod
def user_is_responsible_or_contributor_or_delegate_Q(user):
represented_users = user.represented_users.all() | UserProfile.objects.filter(pk=user.pk)
return Q(Q(contributions__contributor__in=represented_users) | Q(course__responsibles__in=represented_users))

def is_user_responsible_or_contributor_or_delegate(self, user):
# early out that saves database hits since is_responsible_or_contributor_or_delegate is a cached_property
if not user.is_responsible_or_contributor_or_delegate:
Expand Down
21 changes: 17 additions & 4 deletions evap/results/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from collections.abc import Iterable
from copy import copy
from enum import Enum
from itertools import takewhile
from math import ceil, modf
from typing import TypeGuard, cast

Expand Down Expand Up @@ -340,17 +341,21 @@ def average_non_grade_rating_questions_distribution(results):
)


def calculate_average_course_distribution(course, check_for_unpublished_evaluations=True):
def calculate_average_course_distribution(course, check_for_unpublished_evaluations=True, annotated_evaluations=None):
if check_for_unpublished_evaluations and course.evaluations.exclude(state=Evaluation.State.PUBLISHED).exists():
return None
if annotated_evaluations is None:
annotated_evaluations = course.evaluations.all()
else:
annotated_evaluations = takewhile(lambda e: e.course == course, annotated_evaluations)

return avg_distribution(
[
(
calculate_average_distribution(evaluation),
evaluation.weight,
)
for evaluation in course.evaluations.all()
for evaluation in annotated_evaluations
]
)

Expand All @@ -361,6 +366,12 @@ def get_evaluations_with_course_result_attributes(evaluations):
.filter(Exists(Evaluation.objects.filter(course=OuterRef("pk")).exclude(state=Evaluation.State.PUBLISHED)))
.values_list("id", flat=True)
)
courses_without_unpublished_evaluations = Course.objects.filter(evaluations__in=evaluations).exclude(
id__in=courses_with_unpublished_evaluations
)
course_distribution_evaluations = Evaluation.annotate_with_participant_and_voter_counts(
Evaluation.objects.filter(course__in=courses_without_unpublished_evaluations)
).order_by("course__id")

course_id_evaluation_weight_sum_pairs = (
Course.objects.annotate(Sum("evaluations__weight"))
Expand All @@ -370,12 +381,14 @@ def get_evaluations_with_course_result_attributes(evaluations):

evaluation_weight_sum_per_course_id = {entry[0]: entry[1] for entry in course_id_evaluation_weight_sum_pairs}

for evaluation in evaluations:
for evaluation in sorted(evaluations, key=lambda e: e.course.id):
if evaluation.course.id in courses_with_unpublished_evaluations:
evaluation.course.not_all_evaluations_are_published = True
evaluation.course.distribution = None
else:
evaluation.course.distribution = calculate_average_course_distribution(evaluation.course, False)
evaluation.course.distribution = calculate_average_course_distribution(
evaluation.course, False, course_distribution_evaluations
)

evaluation.course.evaluation_count = evaluation.course.evaluations.count()
evaluation.course.avg_grade = distribution_to_grade(evaluation.course.distribution)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ dev = [
"django-debug-toolbar~=6.0",
"django-stubs~=6.0.2",
"django-webtest~=1.9.13",
"model-bakery~=1.23.3",
"model-bakery~=1.23.4",
"mypy~=1.20.0",
"openpyxl-stubs~=0.1.25",
"pylint-django~=2.7.0",
Expand Down
8 changes: 4 additions & 4 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading