From 8d1cc334e3e802cfa3869cbd64653a75ce05dbef Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 19 May 2025 18:16:58 +0200
Subject: [PATCH 01/20] remove private
---
evap/evaluation/models.py | 9 ---------
evap/evaluation/tests/test_models.py | 1 -
evap/results/tools.py | 4 ++--
.../templates/staff_evaluation_textanswers_section.html | 3 ---
evap/staff/views.py | 1 -
5 files changed, 2 insertions(+), 16 deletions(-)
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py
index e558def6f3..96629227fa 100644
--- a/evap/evaluation/models.py
+++ b/evap/evaluation/models.py
@@ -1628,7 +1628,6 @@ class ReviewDecision(models.TextChoices):
"""
PUBLIC = "PU", _("public")
- PRIVATE = "PR", _("private") # This answer should only be displayed to the contributor the question was about
DELETED = "DE", _("deleted")
UNDECIDED = "UN", _("undecided")
@@ -1658,10 +1657,6 @@ class Meta:
def will_be_deleted(self):
return self.review_decision == self.ReviewDecision.DELETED
- @property
- def will_be_private(self):
- return self.review_decision == self.ReviewDecision.PRIVATE
-
@property
def will_be_public(self):
return self.review_decision == self.ReviewDecision.PUBLIC
@@ -1672,10 +1667,6 @@ def will_be_public(self):
def is_public(self):
return self.will_be_public
- @property
- def is_private(self):
- return self.will_be_private
-
@property
def is_reviewed(self):
return self.review_decision != self.ReviewDecision.UNDECIDED
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py
index 7f3751a4cc..8dca0d7306 100644
--- a/evap/evaluation/tests/test_models.py
+++ b/evap/evaluation/tests/test_models.py
@@ -386,7 +386,6 @@ def test_textanswers_to_delete_get_deleted_on_publish(self):
[
TextAnswer.ReviewDecision.DELETED,
TextAnswer.ReviewDecision.PUBLIC,
- TextAnswer.ReviewDecision.PRIVATE,
]
),
_quantity=3,
diff --git a/evap/results/tools.py b/evap/results/tools.py
index 6fdf8ae064..29a5726f42 100644
--- a/evap/results/tools.py
+++ b/evap/results/tools.py
@@ -231,7 +231,7 @@ def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool =
((textanswer.contribution_id, textanswer.assignment_id), textanswer)
for contribution in evaluation.contributions.all()
for textanswer in contribution.textanswer_set.all()
- if textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC]
+ if textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC
)
racs_per_contribution_assignment: dict[tuple[int, int], list[RatingAnswerCounter]] = unordered_groupby(
@@ -483,7 +483,7 @@ def can_textanswer_be_seen_by( # noqa: PLR0911,PLR0912
view_general_results: ViewGeneralResults,
view_contributor_results: ViewContributorResults,
) -> bool:
- assert textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC]
+ assert textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC
contributor = textanswer.contribution.contributor
# NOTE: when changing this behavior, make sure all changes are also reflected in results.tools.textanswers_visible_to
diff --git a/evap/staff/templates/staff_evaluation_textanswers_section.html b/evap/staff/templates/staff_evaluation_textanswers_section.html
index 1a296e5b41..d6ef8c0942 100644
--- a/evap/staff/templates/staff_evaluation_textanswers_section.html
+++ b/evap/staff/templates/staff_evaluation_textanswers_section.html
@@ -39,9 +39,6 @@
-
-
-
diff --git a/evap/staff/views.py b/evap/staff/views.py
index 137e8baf3b..4666bca574 100644
--- a/evap/staff/views.py
+++ b/evap/staff/views.py
@@ -1740,7 +1740,6 @@ def evaluation_textanswers_update_publish(request):
review_decision_for_action = {
"publish": TextAnswer.ReviewDecision.PUBLIC,
- "make_private": TextAnswer.ReviewDecision.PRIVATE,
"delete": TextAnswer.ReviewDecision.DELETED,
"unreview": TextAnswer.ReviewDecision.UNDECIDED,
}
From 65c39dea30492afcca017a94c9e4f8af574553d4 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 4 Nov 2024 21:53:07 +0100
Subject: [PATCH 02/20] Add average results to exporter
---
evap/results/exporters.py | 148 ++++++++++++++++++++-------
evap/results/tests/test_exporters.py | 79 ++++++++------
evap/staff/views.py | 6 +-
3 files changed, 162 insertions(+), 71 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index bc48e4d746..0d29caebd0 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -114,24 +114,29 @@ def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Que
return filtered_questions
@staticmethod
- def filter_evaluations(
- semesters: Iterable[Semester],
- evaluation_states: Iterable[Evaluation.State],
- program_ids: Iterable[int],
- course_type_ids: Iterable[int],
- contributor: UserProfile | None,
- include_not_enough_voters: bool,
+ def filter_evaluations( # noqa: PLR0912
+ semesters: Iterable[Semester] | None = None,
+ evaluation_states: Iterable[Evaluation.State] | None = None,
+ program_ids: Iterable[int] | None = None,
+ course_type_ids: Iterable[int] | None = None,
+ contributor: UserProfile | None = None,
+ include_not_enough_voters: bool = False,
) -> tuple[list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], list[Questionnaire], bool]:
# pylint: disable=too-many-locals
course_results_exist = False
evaluations_with_results = []
used_questionnaires: set[Questionnaire] = set()
- evaluations_filter = Q(
- course__semester__in=semesters,
- state__in=evaluation_states,
- course__programs__in=program_ids,
- course__type__in=course_type_ids,
- )
+
+ evaluations_filter = Q()
+ if semesters:
+ evaluations_filter &= Q(course__semester__in=semesters)
+ if evaluation_states:
+ evaluations_filter &= Q(state__in=evaluation_states)
+ if program_ids:
+ evaluations_filter &= Q(course__programs__in=program_ids)
+ if course_type_ids:
+ evaluations_filter &= Q(course__type__in=course_type_ids)
+
if contributor:
evaluations_filter = evaluations_filter & (
Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor])
@@ -198,6 +203,8 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
+ self.write_cell("Average for this Question", "evaluation")
+
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
if len(semesters) > 1:
@@ -208,17 +215,19 @@ def write_headings_and_evaluation_info(
self.next_row()
self.write_cell(_("Programs"), "bold")
+ self.write_cell("", "program")
for evaluation, __ in evaluations_with_results:
self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program")
self.next_row()
self.write_cell(_("Course Type"), "bold")
+ self.write_cell("", "program")
for evaluation, __ in evaluations_with_results:
self.write_cell(evaluation.course.type.name, "border_left_right")
self.next_row()
# One more cell is needed for the question column
- self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results))
+ self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1))
def write_overall_results(
self,
@@ -228,14 +237,17 @@ def write_overall_results(
annotated_evaluations = [e for e, __ in evaluations_with_results]
self.write_cell(_("Overall Average Grade"), "bold")
+ self.write_cell("", "border_left_right")
averages = (distribution_to_grade(calculate_average_distribution(e)) for e in annotated_evaluations)
self.write_row(averages, lambda avg: self.grade_to_style(avg) if avg else "border_left_right")
self.write_cell(_("Total voters/Total participants"), "bold")
+ self.write_cell("", "total_voters")
voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in annotated_evaluations)
self.write_row(voter_ratios, style="total_voters")
self.write_cell(_("Evaluation rate"), "bold")
+ self.write_cell("", "evaluation_rate")
# round down like in progress bar
participant_percentages = (
f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%"
@@ -249,17 +261,19 @@ def write_overall_results(
# Borders only if there is a course grade below. Offset by one column
self.write_empty_row_with_styles(
- ["default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1]
+ ["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1]
)
self.write_cell(_("Evaluation weight"), "bold")
- weight_percentages = (
+ self.write_cell("")
+ weight_percentages = tuple(
f"{e.weight_percentage}%" if gt1 else None
for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True)
)
self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default")
self.write_cell(_("Course Grade"), "bold")
+ self.write_cell("")
for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True):
if not gt1:
self.write_cell()
@@ -271,58 +285,118 @@ def write_overall_results(
self.next_row()
# Same reasoning as above.
- self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1])
+ self.write_empty_row_with_styles(
+ ["default", "default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1]
+ )
+
+ @classmethod
+ def _calculate_display_result(
+ cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]]
+ ) -> tuple[float | None, float | None]:
+ values = []
+ count_sum = 0
+ approval_count = 0
+
+ for grade_result in results[questionnaire_id]:
+ if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result):
+ continue
+
+ values.append(grade_result.average * grade_result.count_sum)
+ count_sum += grade_result.count_sum
+ if grade_result.question.is_yes_no_question:
+ approval_count += grade_result.approval_count
+
+ if not values:
+ return None, None
+
+ avg = sum(values) / count_sum
+ if question.is_yes_no_question:
+ percent_approval = approval_count / count_sum if count_sum > 0 else 0
+ return avg, percent_approval
+ return avg, None
+
+ @classmethod
+ def _calculate_display_result_average(
+ cls,
+ evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]],
+ questionnaire_id: int,
+ question: Question,
+ ) -> tuple[float | None, float | None]:
+ avg_values = []
+ count_avg = 0
+ avg_approval = []
+ count_approval = 0
+
+ for __, results in evaluations_with_results:
+ if (
+ results.get(questionnaire_id) is None
+ ): # ignore all results without the questionaire for average calculation
+ continue
+ avg, percent_approval = cls._calculate_display_result(questionnaire_id, question, results)
+ if avg is not None:
+ avg_values.append(avg)
+ count_avg += 1
+ if percent_approval is not None:
+ avg_approval.append(percent_approval)
+ count_approval += 1
+
+ return sum(avg_values) / count_avg if count_avg else None, (
+ sum(avg_approval) / count_approval if count_approval else None
+ )
def write_questionnaire(
self,
questionnaire: Questionnaire,
evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]],
contributor: UserProfile | None,
+ all_evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]],
) -> None:
if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR:
self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold")
else:
self.write_cell(questionnaire.public_name, "bold")
+ self.write_cell("", "border_left_right")
+
# first cell of row is printed above
self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results))
for question in self.filter_text_and_heading_questions(questionnaire.questions.all()):
self.write_cell(question.text, "italic" if question.is_heading_question else "default")
+ question_average, question_approval_count = self._calculate_display_result_average(
+ all_evaluations_with_results, questionnaire.id, question
+ )
+
+ if question_average is not None:
+ if question.is_yes_no_question:
+ self.write_cell(f"{question_approval_count:.0%}", self.grade_to_style(question_average))
+ else:
+ self.write_cell(question_average, self.grade_to_style(question_average))
+ else:
+ self.write_cell("", "border_left_right")
+
+ # evaluations
for __, results in evaluations_with_results:
if questionnaire.id not in results or question.is_heading_question:
self.write_cell(style="border_left_right")
continue
- values = []
- count_sum = 0
- approval_count = 0
-
- for grade_result in results[questionnaire.id]:
- if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result):
- continue
+ avg, percent_approval = self._calculate_display_result(questionnaire.id, question, results)
- values.append(grade_result.average * grade_result.count_sum)
- count_sum += grade_result.count_sum
- if grade_result.question.is_yes_no_question:
- approval_count += grade_result.approval_count
-
- if not values:
+ if avg is None:
self.write_cell(style="border_left_right")
continue
- avg = sum(values) / count_sum
if question.is_yes_no_question:
- percent_approval = approval_count / count_sum if count_sum > 0 else 0
self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg))
else:
self.write_cell(avg, self.grade_to_style(avg))
self.next_row()
- self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results))
+ self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1))
- # pylint: disable=arguments-differ
+ # pylint: disable=arguments-differ,too-many-locals
def export_impl(
self,
semesters: QuerySetOrSequence[Semester],
@@ -335,6 +409,8 @@ def export_impl(
# We want to throw early here, since workbook.save() will throw an IndexError otherwise.
assert len(selection_list) > 0
+ all_evaluations_with_results, _, _ = self.filter_evaluations(evaluation_states=[Evaluation.State.PUBLISHED])
+
for sheet_counter, (program_ids, course_type_ids) in enumerate(selection_list, 1):
self.cur_sheet = self.workbook.add_sheet("Sheet " + str(sheet_counter))
self.cur_row = 0
@@ -358,7 +434,9 @@ def export_impl(
)
for questionnaire in used_questionnaires:
- self.write_questionnaire(questionnaire, evaluations_with_results, contributor)
+ self.write_questionnaire(
+ questionnaire, evaluations_with_results, contributor, all_evaluations_with_results
+ )
self.write_overall_results(evaluations_with_results, course_results_exist)
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index a6daafcd25..b84a8eb21c 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -188,12 +188,12 @@ def test_view_excel_file_sorted(self):
# Load responses as Excel files and check for correct sorting
workbook = xlrd.open_workbook(file_contents=content_de.read())
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation1\n")
- self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation2\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation1\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation2\n")
workbook = xlrd.open_workbook(file_contents=content_en.read())
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation2\n")
- self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation2\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation1\n")
def test_course_type_ordering(self):
program = baker.make(Program)
@@ -236,8 +236,8 @@ def test_course_type_ordering(self):
binary_content.seek(0)
workbook = xlrd.open_workbook(file_contents=binary_content.read())
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name + "\n")
- self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_2.full_name + "\n")
course_type_2.order = 0
course_type_2.save()
@@ -249,8 +249,8 @@ def test_course_type_ordering(self):
binary_content.seek(0)
workbook = xlrd.open_workbook(file_contents=binary_content.read())
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n")
- self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n")
+ self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_1.full_name + "\n")
def test_multiple_sheets(self):
binary_content = BytesIO()
@@ -302,17 +302,17 @@ def test_include_unpublished(self):
sheet = self.get_export_sheet(
include_unpublished=False, semester=semester, program=program, course_types=course_types
)
- self.assertEqual(len(sheet.row_values(0)), 2)
- self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name)
+ self.assertEqual(len(sheet.row_values(0)), 3)
+ self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name)
# Now, make sure that it appears when wanted
sheet = self.get_export_sheet(
include_unpublished=True, semester=semester, program=program, course_types=course_types
)
- self.assertEqual(len(sheet.row_values(0)), 3)
+ self.assertEqual(len(sheet.row_values(0)), 4)
# These two should be ordered according to evaluation.course.type.order
- self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name)
- self.assertEqual(sheet.row_values(0)[2][:-1], unpublished_evaluation.full_name)
+ self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name)
+ self.assertEqual(sheet.row_values(0)[3][:-1], unpublished_evaluation.full_name)
def test_include_not_enough_voters(self):
semester = baker.make(Semester)
@@ -341,15 +341,15 @@ def test_include_not_enough_voters(self):
# First, make sure that the one with only a single voter does not appear
sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=False)
- self.assertEqual(len(sheet.row_values(0)), 2)
- self.assertEqual(sheet.row_values(0)[1][:-1], enough_voters_evaluation.full_name)
+ self.assertEqual(len(sheet.row_values(0)), 3)
+ self.assertEqual(sheet.row_values(0)[2][:-1], enough_voters_evaluation.full_name)
# Now, check with the option enabled
sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=True)
- self.assertEqual(len(sheet.row_values(0)), 3)
+ self.assertEqual(len(sheet.row_values(0)), 4)
self.assertEqual(
{enough_voters_evaluation.full_name, not_enough_voters_evaluation.full_name},
- {sheet.row_values(0)[1][:-1], sheet.row_values(0)[2][:-1]},
+ {sheet.row_values(0)[2][:-1], sheet.row_values(0)[3][:-1]},
)
def test_no_program_or_course_type(self):
@@ -357,6 +357,17 @@ def test_no_program_or_course_type(self):
with self.assertRaises(AssertionError):
ResultsExporter().export(BytesIO(), [evaluation.course.semester], [])
+ def test_exclude_single_result(self):
+ program = baker.make(Program)
+ evaluation = baker.make(
+ Evaluation, is_single_result=True, state=Evaluation.State.PUBLISHED, course__programs=[program]
+ )
+ cache_results(evaluation)
+ sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id])
+ self.assertEqual(
+ len(sheet.row_values(0)), 2, "There should be no column for the evaluation, only the row description"
+ )
+
def test_exclude_used_but_unanswered_questionnaires(self):
program = baker.make(Program)
evaluation = baker.make(
@@ -392,7 +403,7 @@ def test_program_course_type_name(self):
cache_results(evaluation)
sheet = self.get_export_sheet(evaluation.course.semester, program, [course_type.id])
- self.assertEqual(sheet.col_values(1)[1:3], [program.name, course_type.name])
+ self.assertEqual(sheet.col_values(2)[1:3], [program.name, course_type.name])
def test_multiple_evaluations(self):
semester = baker.make(Semester)
@@ -408,7 +419,7 @@ def test_multiple_evaluations(self):
sheet = self.get_export_sheet(semester, program, [evaluation1.course.type.id, evaluation2.course.type.id])
- self.assertEqual(set(sheet.row_values(0)[1:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"})
+ self.assertEqual(set(sheet.row_values(0)[2:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"})
def test_correct_grades_and_bottom_numbers(self):
program = baker.make(Program)
@@ -436,11 +447,11 @@ def test_correct_grades_and_bottom_numbers(self):
sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id])
- self.assertEqual(sheet.row_values(5)[1], 2.0) # question 1 average
- self.assertEqual(sheet.row_values(8)[1], 3.0) # question 2 average
- self.assertEqual(sheet.row_values(10)[1], 2.5) # Average grade
- self.assertEqual(sheet.row_values(11)[1], "5/10") # Voters / Participants
- self.assertEqual(sheet.row_values(12)[1], "50%") # Voter percentage
+ self.assertEqual(sheet.row_values(5)[2], 2.0) # question 1 average
+ self.assertEqual(sheet.row_values(8)[2], 3.0) # question 2 average
+ self.assertEqual(sheet.row_values(10)[2], 2.5) # Average grade
+ self.assertEqual(sheet.row_values(11)[2], "5/10") # Voters / Participants
+ self.assertEqual(sheet.row_values(12)[2], "50%") # Voter percentage
def test_course_grade(self):
program = baker.make(Program)
@@ -470,9 +481,9 @@ def test_course_grade(self):
cache_results(evaluation)
sheet = self.get_export_sheet(course.semester, program, [course.type.id])
- self.assertEqual(sheet.row_values(12)[1], expected_average)
self.assertEqual(sheet.row_values(12)[2], expected_average)
self.assertEqual(sheet.row_values(12)[3], expected_average)
+ self.assertEqual(sheet.row_values(12)[4], expected_average)
def test_yes_no_question_result(self):
program = baker.make(Program)
@@ -494,8 +505,8 @@ def test_yes_no_question_result(self):
cache_results(evaluation)
sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id])
- self.assertEqual(sheet.row_values(5)[0], assignment.question.text)
- self.assertEqual(sheet.row_values(5)[1], "67%")
+ self.assertEqual(sheet.row_values(5)[0], question.text)
+ self.assertEqual(sheet.row_values(5)[2], "67%")
def test_contributor_result_export(self):
program = baker.make(Program)
@@ -544,24 +555,24 @@ def test_contributor_result_export(self):
workbook = xlrd.open_workbook(file_contents=binary_content)
self.assertEqual(
- workbook.sheets()[0].row_values(0)[1],
+ workbook.sheets()[0].row_values(0)[2],
f"{evaluation_1.full_name}\n{evaluation_1.course.semester.name}\n{contributor.full_name}",
)
self.assertEqual(
- workbook.sheets()[0].row_values(0)[2],
+ workbook.sheets()[0].row_values(0)[3],
f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}",
)
self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.public_name)
- self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_assignment.question.text)
- self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0)
+ self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text)
+ self.assertEqual(workbook.sheets()[0].row_values(5)[3], 4.0)
self.assertEqual(
workbook.sheets()[0].row_values(7)[0],
f"{contributor_questionnaire.public_name} ({contributor.full_name})",
)
- self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_assignment.question.text)
- self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0)
+ self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text)
+ self.assertEqual(workbook.sheets()[0].row_values(8)[3], 3.0)
self.assertEqual(workbook.sheets()[0].row_values(10)[0], "Overall Average Grade")
- self.assertEqual(workbook.sheets()[0].row_values(10)[2], 3.25)
+ self.assertEqual(workbook.sheets()[0].row_values(10)[3], 3.25)
def test_text_answer_export(self):
evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, can_publish_text_results=True)
diff --git a/evap/staff/views.py b/evap/staff/views.py
index 4666bca574..ce7c68953a 100644
--- a/evap/staff/views.py
+++ b/evap/staff/views.py
@@ -6,7 +6,7 @@
from dataclasses import dataclass
from datetime import date, datetime
from enum import Enum
-from typing import Any, Final, Literal, assert_never, cast
+from typing import Any, Literal, assert_never, cast
import openpyxl
from django.conf import settings
@@ -1539,7 +1539,9 @@ def evaluation_person_management(request, evaluation_id: int):
raise SuspiciousOperation("Invalid POST operation")
import_action = ImportAction.from_operation(operation)
- import_type: Final = ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR
+ import_type: Literal[ImportType.PARTICIPANT, ImportType.CONTRIBUTOR] = (
+ ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR
+ )
excel_form = participant_excel_form if "participants" in operation else contributor_excel_form
copy_form = participant_copy_form if "participants" in operation else contributor_copy_form
From 573e48e5092fe0075d131092cf92145cdebfbb08 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 13 Jan 2025 21:03:24 +0100
Subject: [PATCH 03/20] add tests for results exporter average
---
evap/results/tests/test_exporters.py | 163 ++++++++++++++++++++++++++-
1 file changed, 160 insertions(+), 3 deletions(-)
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index b84a8eb21c..e85334729e 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -614,6 +614,163 @@ def test_text_answer_export(self):
self.assertEqual(sheet.row_values(2)[0], evaluation.course.responsibles_names)
# Questions are ordered by questionnaire type, answers keep their order respectively
- self.assertEqual(sheet.row_values(3)[0], assignments[0].question.text)
- self.assertEqual(sheet.row_values(5)[0], assignments[1].question.text)
- self.assertEqual(sheet.row_values(6)[0], assignments[2].question.text)
+ self.assertEqual(sheet.row_values(3)[0], questions[0].text)
+ self.assertEqual(sheet.row_values(5)[0], questions[1].text)
+ self.assertEqual(sheet.row_values(6)[0], questions[2].text)
+
+ def test_total_average(self):
+ program = baker.make(Program)
+
+ questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
+
+ question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire)
+
+ evaluation_1 = baker.make(
+ Evaluation,
+ course__programs=[program],
+ state=Evaluation.State.PUBLISHED,
+ _participant_count=2,
+ _voter_count=2,
+ )
+
+ evaluation_2 = baker.make(
+ Evaluation,
+ course__programs=[program],
+ state=Evaluation.State.PUBLISHED,
+ _participant_count=2,
+ _voter_count=2,
+ )
+
+ evaluation_1.general_contribution.questionnaires.set([questionnaire])
+
+ make_rating_answer_counters(question, evaluation_1.general_contribution)
+
+ evaluation_2.general_contribution.questionnaires.set([questionnaire])
+
+ make_rating_answer_counters(question, evaluation_2.general_contribution)
+
+ cache_results(evaluation_1)
+ cache_results(evaluation_2)
+
+ binary_content = BytesIO()
+ ResultsExporter().export(
+ binary_content,
+ [evaluation_1.course.semester, evaluation_2.course.semester],
+ [
+ (
+ [course_program.id for course_program in evaluation_1.course.programs.all()]
+ + [course_program.id for course_program in evaluation_2.course.programs.all()],
+ [evaluation_1.course.type.id, evaluation_2.course.type.id],
+ )
+ ],
+ True,
+ True,
+ )
+ binary_content.seek(0)
+ workbook = xlrd.open_workbook(file_contents=binary_content.read())
+
+ self.assertEqual(
+ float(workbook.sheets()[0].row_values(5)[1]),
+ (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2,
+ )
+
+ self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
+
+ def test_not_all_contributions_are_created_equal(self):
+ program = baker.make(Program)
+
+ questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
+ questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP)
+
+ question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_1)
+ question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2)
+
+ evaluation_1 = baker.make(
+ Evaluation,
+ course__programs=[program],
+ state=Evaluation.State.PUBLISHED,
+ _participant_count=2,
+ _voter_count=2,
+ )
+
+ evaluation_2 = baker.make(
+ Evaluation,
+ course__programs=[program],
+ state=Evaluation.State.PUBLISHED,
+ _participant_count=2,
+ _voter_count=2,
+ )
+
+ evaluation_1.general_contribution.questionnaires.set([questionnaire_1])
+
+ make_rating_answer_counters(question_1, evaluation_1.general_contribution)
+
+ evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2])
+
+ make_rating_answer_counters(question_1, evaluation_2.general_contribution)
+ make_rating_answer_counters(question_2, evaluation_2.general_contribution)
+
+ cache_results(evaluation_1)
+ cache_results(evaluation_2)
+
+ binary_content = BytesIO()
+ ResultsExporter().export(
+ binary_content,
+ [evaluation_1.course.semester, evaluation_2.course.semester],
+ [
+ (
+ [course_program.id for course_program in evaluation_1.course.programs.all()]
+ + [course_program.id for course_program in evaluation_2.course.programs.all()],
+ [evaluation_1.course.type.id, evaluation_2.course.type.id],
+ )
+ ],
+ True,
+ True,
+ )
+ binary_content.seek(0)
+ workbook = xlrd.open_workbook(file_contents=binary_content.read())
+
+ self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3]))
+ self.assertEqual("", workbook.sheets()[0].row_values(8)[2])
+
+ self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
+
+ def test_only_export_averages(self):
+ program = baker.make(Program)
+
+ questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
+
+ question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire)
+
+ evaluation = baker.make(
+ Evaluation,
+ course__programs=[program],
+ state=Evaluation.State.PUBLISHED,
+ _participant_count=2,
+ _voter_count=2,
+ )
+
+ evaluation.general_contribution.questionnaires.set([questionnaire])
+
+ make_rating_answer_counters(question, evaluation.general_contribution)
+
+ cache_results(evaluation)
+
+ binary_content = BytesIO()
+ ResultsExporter().export(
+ binary_content,
+ [],
+ [
+ (
+ [],
+ [],
+ )
+ ],
+ True,
+ True,
+ )
+ binary_content.seek(0)
+ workbook = xlrd.open_workbook(file_contents=binary_content.read())
+
+ self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
+ self.assertEqual(workbook.sheets()[0].row_values(5)[1], 1.0)
From 6de7b5d3ffc6a1db1614385f4a13c9ec3c16b882 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 27 Jan 2025 19:25:44 +0100
Subject: [PATCH 04/20] adress comments
---
evap/results/exporters.py | 52 +++++++++++++++++++--------------------
1 file changed, 25 insertions(+), 27 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 0d29caebd0..aafac1451a 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -203,7 +203,7 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
- self.write_cell("Average for this Question", "evaluation")
+ self.write_cell(_("Average for this Question"), "evaluation")
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
@@ -226,7 +226,7 @@ def write_headings_and_evaluation_info(
self.write_cell(evaluation.course.type.name, "border_left_right")
self.next_row()
- # One more cell is needed for the question column
+ # One column for the question, one column for the average, n columns for the evaluations
self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1))
def write_overall_results(
@@ -259,7 +259,7 @@ def write_overall_results(
# Only query the number of evaluations once and keep track of it here.
count_gt_1: list[bool] = [e.course_evaluations_count > 1 for e in annotated_evaluations]
- # Borders only if there is a course grade below. Offset by one column
+ # Borders only if there is a course grade below. Offset by one column for column title and one for average
self.write_empty_row_with_styles(
["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1]
)
@@ -293,7 +293,7 @@ def write_overall_results(
def _calculate_display_result(
cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]]
) -> tuple[float | None, float | None]:
- values = []
+ value_sum = 0.0
count_sum = 0
approval_count = 0
@@ -301,18 +301,18 @@ def _calculate_display_result(
if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result):
continue
- values.append(grade_result.average * grade_result.count_sum)
+ value_sum += grade_result.average * grade_result.count_sum
count_sum += grade_result.count_sum
if grade_result.question.is_yes_no_question:
approval_count += grade_result.approval_count
- if not values:
+ if not value_sum:
return None, None
- avg = sum(values) / count_sum
+ avg = value_sum / count_sum
if question.is_yes_no_question:
- percent_approval = approval_count / count_sum if count_sum > 0 else 0
- return avg, percent_approval
+ average_approval_ratio = approval_count / count_sum if count_sum > 0 else 0
+ return avg, average_approval_ratio
return avg, None
@classmethod
@@ -322,26 +322,26 @@ def _calculate_display_result_average(
questionnaire_id: int,
question: Question,
) -> tuple[float | None, float | None]:
- avg_values = []
+ avg_value_sum = 0.0
count_avg = 0
- avg_approval = []
+ avg_approval_sum = 0.0
count_approval = 0
for __, results in evaluations_with_results:
if (
results.get(questionnaire_id) is None
- ): # ignore all results without the questionaire for average calculation
+ ): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire
continue
- avg, percent_approval = cls._calculate_display_result(questionnaire_id, question, results)
+ avg, average_approval_ratio = cls._calculate_display_result(questionnaire_id, question, results)
if avg is not None:
- avg_values.append(avg)
+ avg_value_sum += avg
count_avg += 1
- if percent_approval is not None:
- avg_approval.append(percent_approval)
+ if average_approval_ratio is not None:
+ avg_approval_sum += average_approval_ratio
count_approval += 1
- return sum(avg_values) / count_avg if count_avg else None, (
- sum(avg_approval) / count_approval if count_approval else None
+ return avg_value_sum / count_avg if count_avg else None, (
+ avg_approval_sum / count_approval if count_approval else None
)
def write_questionnaire(
@@ -364,15 +364,13 @@ def write_questionnaire(
for question in self.filter_text_and_heading_questions(questionnaire.questions.all()):
self.write_cell(question.text, "italic" if question.is_heading_question else "default")
- question_average, question_approval_count = self._calculate_display_result_average(
+ average_grade, approval_ratio = self._calculate_display_result_average(
all_evaluations_with_results, questionnaire.id, question
)
-
- if question_average is not None:
- if question.is_yes_no_question:
- self.write_cell(f"{question_approval_count:.0%}", self.grade_to_style(question_average))
- else:
- self.write_cell(question_average, self.grade_to_style(question_average))
+ if approval_ratio is not None and average_grade is not None:
+ self.write_cell(f"{approval_ratio:.0%}", self.grade_to_style(average_grade))
+ elif average_grade is not None:
+ self.write_cell(average_grade, self.grade_to_style(average_grade))
else:
self.write_cell("", "border_left_right")
@@ -382,14 +380,14 @@ def write_questionnaire(
self.write_cell(style="border_left_right")
continue
- avg, percent_approval = self._calculate_display_result(questionnaire.id, question, results)
+ avg, average_approval_ratio = self._calculate_display_result(questionnaire.id, question, results)
if avg is None:
self.write_cell(style="border_left_right")
continue
if question.is_yes_no_question:
- self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg))
+ self.write_cell(f"{average_approval_ratio:.0%}", self.grade_to_style(avg))
else:
self.write_cell(avg, self.grade_to_style(avg))
self.next_row()
From 634f1399314743ca3fca3ec11c241f8c872db697 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 17 Feb 2025 17:54:46 +0100
Subject: [PATCH 05/20] revert to tuple
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index aafac1451a..39f64aefd5 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -266,7 +266,7 @@ def write_overall_results(
self.write_cell(_("Evaluation weight"), "bold")
self.write_cell("")
- weight_percentages = tuple(
+ weight_percentages = (
f"{e.weight_percentage}%" if gt1 else None
for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True)
)
From 2ec6729e1b465b9b28210dc89dee27297ce9443a Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 17 Feb 2025 18:27:37 +0100
Subject: [PATCH 06/20] rename functions
---
evap/results/exporters.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 39f64aefd5..8f5b974263 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -290,7 +290,7 @@ def write_overall_results(
)
@classmethod
- def _calculate_display_result(
+ def _get_average_grade_and_approval_ratio(
cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]]
) -> tuple[float | None, float | None]:
value_sum = 0.0
@@ -316,7 +316,7 @@ def _calculate_display_result(
return avg, None
@classmethod
- def _calculate_display_result_average(
+ def _get_average_of_average_grade_and_approval_ratio(
cls,
evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]],
questionnaire_id: int,
@@ -332,7 +332,7 @@ def _calculate_display_result_average(
results.get(questionnaire_id) is None
): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire
continue
- avg, average_approval_ratio = cls._calculate_display_result(questionnaire_id, question, results)
+ avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results)
if avg is not None:
avg_value_sum += avg
count_avg += 1
@@ -364,7 +364,7 @@ def write_questionnaire(
for question in self.filter_text_and_heading_questions(questionnaire.questions.all()):
self.write_cell(question.text, "italic" if question.is_heading_question else "default")
- average_grade, approval_ratio = self._calculate_display_result_average(
+ average_grade, approval_ratio = self._get_average_of_average_grade_and_approval_ratio(
all_evaluations_with_results, questionnaire.id, question
)
if approval_ratio is not None and average_grade is not None:
@@ -380,7 +380,9 @@ def write_questionnaire(
self.write_cell(style="border_left_right")
continue
- avg, average_approval_ratio = self._calculate_display_result(questionnaire.id, question, results)
+ avg, average_approval_ratio = self._get_average_grade_and_approval_ratio(
+ questionnaire.id, question, results
+ )
if avg is None:
self.write_cell(style="border_left_right")
From d5b83113641154902c18ed9812b836878e4e0675 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 17 Feb 2025 18:36:33 +0100
Subject: [PATCH 07/20] add style
---
evap/evaluation/tools.py | 1 +
evap/results/exporters.py | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py
index 8ff1cb777b..e3f0c8fe7e 100644
--- a/evap/evaluation/tools.py
+++ b/evap/evaluation/tools.py
@@ -295,6 +295,7 @@ def content(self, value):
class ExcelExporter(ABC):
styles = {
"default": xlwt.Style.default_style,
+ "missing_average": xlwt.Style.default_style,
"headline": xlwt.easyxf(
"font: bold on, height 400; alignment: horiz centre, vert centre, wrap on; borders: bottom medium",
num_format_str="0.0",
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 8f5b974263..9f5f9f5236 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -265,7 +265,7 @@ def write_overall_results(
)
self.write_cell(_("Evaluation weight"), "bold")
- self.write_cell("")
+ self.write_cell("", "missing_average")
weight_percentages = (
f"{e.weight_percentage}%" if gt1 else None
for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True)
@@ -273,7 +273,7 @@ def write_overall_results(
self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default")
self.write_cell(_("Course Grade"), "bold")
- self.write_cell("")
+ self.write_cell("", "missing_average")
for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True):
if not gt1:
self.write_cell()
From 79df495507b79a4c50bfd7d5afaa221044fac0b8 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 17 Feb 2025 20:06:21 +0100
Subject: [PATCH 08/20] remove useless test
---
evap/results/exporters.py | 8 +++---
evap/results/tests/test_exporters.py | 40 ----------------------------
2 files changed, 4 insertions(+), 44 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 9f5f9f5236..c88a5a167d 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -128,13 +128,13 @@ def filter_evaluations( # noqa: PLR0912
used_questionnaires: set[Questionnaire] = set()
evaluations_filter = Q()
- if semesters:
+ if semesters is not None:
evaluations_filter &= Q(course__semester__in=semesters)
- if evaluation_states:
+ if evaluation_states is not None:
evaluations_filter &= Q(state__in=evaluation_states)
- if program_ids:
+ if program_ids is not None:
evaluations_filter &= Q(course__programs__in=program_ids)
- if course_type_ids:
+ if course_type_ids is not None:
evaluations_filter &= Q(course__type__in=course_type_ids)
if contributor:
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index e85334729e..a9c51316f8 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -734,43 +734,3 @@ def test_not_all_contributions_are_created_equal(self):
self.assertEqual("", workbook.sheets()[0].row_values(8)[2])
self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
-
- def test_only_export_averages(self):
- program = baker.make(Program)
-
- questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
-
- question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire)
-
- evaluation = baker.make(
- Evaluation,
- course__programs=[program],
- state=Evaluation.State.PUBLISHED,
- _participant_count=2,
- _voter_count=2,
- )
-
- evaluation.general_contribution.questionnaires.set([questionnaire])
-
- make_rating_answer_counters(question, evaluation.general_contribution)
-
- cache_results(evaluation)
-
- binary_content = BytesIO()
- ResultsExporter().export(
- binary_content,
- [],
- [
- (
- [],
- [],
- )
- ],
- True,
- True,
- )
- binary_content.seek(0)
- workbook = xlrd.open_workbook(file_contents=binary_content.read())
-
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
- self.assertEqual(workbook.sheets()[0].row_values(5)[1], 1.0)
From adf57b2b61f4996f7b6a42e8d31734594421c7f6 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 10 Mar 2025 18:36:45 +0100
Subject: [PATCH 09/20] rephrase
---
evap/results/exporters.py | 2 +-
evap/results/tests/test_exporters.py | 10 ++++++++--
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index c88a5a167d..04f74a1070 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -203,7 +203,7 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
- self.write_cell(_("Average for this Question"), "evaluation")
+ self.write_cell(_("Average for this question over all evaluations in all published semesters"), "evaluation")
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index a9c51316f8..19687710bf 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -674,7 +674,10 @@ def test_total_average(self):
(float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2,
)
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
+ self.assertEqual(
+ workbook.sheets()[0].row_values(0)[1],
+ "Average for this question over all evaluations in all published semesters",
+ )
def test_not_all_contributions_are_created_equal(self):
program = baker.make(Program)
@@ -733,4 +736,7 @@ def test_not_all_contributions_are_created_equal(self):
self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3]))
self.assertEqual("", workbook.sheets()[0].row_values(8)[2])
- self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question")
+ self.assertEqual(
+ workbook.sheets()[0].row_values(0)[1],
+ "Average for this question over all evaluations in all published semesters",
+ )
From 10d9bc79997509dbe1e819819c808c38f72e591c Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 10 Mar 2025 20:25:58 +0100
Subject: [PATCH 10/20] fix test
---
evap/results/exporters.py | 5 ++++-
evap/results/tests/test_exporters.py | 14 +++++++-------
2 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 04f74a1070..73f1f0aaed 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -41,6 +41,9 @@ class ResultsExporter(ExcelExporter):
"evaluation": xlwt.easyxf(
"alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium"
),
+ "average": xlwt.easyxf(
+ "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium; font: italic on"
+ ),
"total_voters": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"),
"evaluation_rate": xlwt.easyxf("alignment: horiz centre; borders: left medium, bottom medium, right medium"),
"evaluation_weight": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"),
@@ -203,7 +206,7 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
- self.write_cell(_("Average for this question over all evaluations in all published semesters"), "evaluation")
+ self.write_cell(_("Average for this question over all evaluations in all published semesters"), "average")
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index 19687710bf..52940fb86b 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -623,7 +623,7 @@ def test_total_average(self):
questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
- question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire)
+ question = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire)
evaluation_1 = baker.make(
Evaluation,
@@ -637,17 +637,17 @@ def test_total_average(self):
Evaluation,
course__programs=[program],
state=Evaluation.State.PUBLISHED,
- _participant_count=2,
- _voter_count=2,
+ _participant_count=3,
+ _voter_count=3,
)
evaluation_1.general_contribution.questionnaires.set([questionnaire])
- make_rating_answer_counters(question, evaluation_1.general_contribution)
+ make_rating_answer_counters(question, evaluation_1.general_contribution, [1, 1, 0, 0, 0])
evaluation_2.general_contribution.questionnaires.set([questionnaire])
- make_rating_answer_counters(question, evaluation_2.general_contribution)
+ make_rating_answer_counters(question, evaluation_2.general_contribution, [1, 2, 0, 0, 0])
cache_results(evaluation_1)
cache_results(evaluation_2)
@@ -669,7 +669,7 @@ def test_total_average(self):
binary_content.seek(0)
workbook = xlrd.open_workbook(file_contents=binary_content.read())
- self.assertEqual(
+ self.assertAlmostEqual(
float(workbook.sheets()[0].row_values(5)[1]),
(float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2,
)
@@ -734,7 +734,7 @@ def test_not_all_contributions_are_created_equal(self):
workbook = xlrd.open_workbook(file_contents=binary_content.read())
self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3]))
- self.assertEqual("", workbook.sheets()[0].row_values(8)[2])
+ self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) # testing empty heading for average
self.assertEqual(
workbook.sheets()[0].row_values(0)[1],
From cdac41d8864b3877ad5a532d7f13e5645609e02d Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 24 Mar 2025 18:21:44 +0100
Subject: [PATCH 11/20] revert type
---
evap/staff/views.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/evap/staff/views.py b/evap/staff/views.py
index ce7c68953a..4666bca574 100644
--- a/evap/staff/views.py
+++ b/evap/staff/views.py
@@ -6,7 +6,7 @@
from dataclasses import dataclass
from datetime import date, datetime
from enum import Enum
-from typing import Any, Literal, assert_never, cast
+from typing import Any, Final, Literal, assert_never, cast
import openpyxl
from django.conf import settings
@@ -1539,9 +1539,7 @@ def evaluation_person_management(request, evaluation_id: int):
raise SuspiciousOperation("Invalid POST operation")
import_action = ImportAction.from_operation(operation)
- import_type: Literal[ImportType.PARTICIPANT, ImportType.CONTRIBUTOR] = (
- ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR
- )
+ import_type: Final = ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR
excel_form = participant_excel_form if "participants" in operation else contributor_excel_form
copy_form = participant_copy_form if "participants" in operation else contributor_copy_form
From 1fa7ef80f01971a6d3122b28829a9887e1eeaf73 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 24 Mar 2025 19:01:53 +0100
Subject: [PATCH 12/20] use hard coded in test
---
evap/results/tests/test_exporters.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index 52940fb86b..1d0a2f70b6 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -669,10 +669,7 @@ def test_total_average(self):
binary_content.seek(0)
workbook = xlrd.open_workbook(file_contents=binary_content.read())
- self.assertAlmostEqual(
- float(workbook.sheets()[0].row_values(5)[1]),
- (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2,
- )
+ self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335)
self.assertEqual(
workbook.sheets()[0].row_values(0)[1],
From 09b764d6dd31ea622cc71be70a5b174a61cd7a74 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 24 Mar 2025 20:07:10 +0100
Subject: [PATCH 13/20] merge tests
---
evap/results/exporters.py | 8 +--
evap/results/tests/test_exporters.py | 76 ++++------------------------
2 files changed, 15 insertions(+), 69 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 73f1f0aaed..1032c008d9 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -333,7 +333,7 @@ def _get_average_of_average_grade_and_approval_ratio(
for __, results in evaluations_with_results:
if (
results.get(questionnaire_id) is None
- ): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire
+ ): # we iterate over all distinct questionnaires from all evaluations but some evaluations do not include a specific questionnaire
continue
avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results)
if avg is not None:
@@ -343,9 +343,9 @@ def _get_average_of_average_grade_and_approval_ratio(
avg_approval_sum += average_approval_ratio
count_approval += 1
- return avg_value_sum / count_avg if count_avg else None, (
- avg_approval_sum / count_approval if count_approval else None
- )
+ avg_value = avg_value_sum / count_avg if count_avg else None
+ avg_approval = avg_approval_sum / count_approval if count_approval else None
+ return avg_value, avg_approval
def write_questionnaire(
self,
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index 1d0a2f70b6..7dc9ba4236 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -621,68 +621,10 @@ def test_text_answer_export(self):
def test_total_average(self):
program = baker.make(Program)
- questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
-
- question = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire)
-
- evaluation_1 = baker.make(
- Evaluation,
- course__programs=[program],
- state=Evaluation.State.PUBLISHED,
- _participant_count=2,
- _voter_count=2,
- )
-
- evaluation_2 = baker.make(
- Evaluation,
- course__programs=[program],
- state=Evaluation.State.PUBLISHED,
- _participant_count=3,
- _voter_count=3,
- )
-
- evaluation_1.general_contribution.questionnaires.set([questionnaire])
-
- make_rating_answer_counters(question, evaluation_1.general_contribution, [1, 1, 0, 0, 0])
-
- evaluation_2.general_contribution.questionnaires.set([questionnaire])
-
- make_rating_answer_counters(question, evaluation_2.general_contribution, [1, 2, 0, 0, 0])
-
- cache_results(evaluation_1)
- cache_results(evaluation_2)
-
- binary_content = BytesIO()
- ResultsExporter().export(
- binary_content,
- [evaluation_1.course.semester, evaluation_2.course.semester],
- [
- (
- [course_program.id for course_program in evaluation_1.course.programs.all()]
- + [course_program.id for course_program in evaluation_2.course.programs.all()],
- [evaluation_1.course.type.id, evaluation_2.course.type.id],
- )
- ],
- True,
- True,
- )
- binary_content.seek(0)
- workbook = xlrd.open_workbook(file_contents=binary_content.read())
-
- self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335)
-
- self.assertEqual(
- workbook.sheets()[0].row_values(0)[1],
- "Average for this question over all evaluations in all published semesters",
- )
-
- def test_not_all_contributions_are_created_equal(self):
- program = baker.make(Program)
-
questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP)
- question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_1)
+ question_1 = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire_1)
question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2)
evaluation_1 = baker.make(
@@ -697,17 +639,17 @@ def test_not_all_contributions_are_created_equal(self):
Evaluation,
course__programs=[program],
state=Evaluation.State.PUBLISHED,
- _participant_count=2,
- _voter_count=2,
+ _participant_count=3,
+ _voter_count=3,
)
evaluation_1.general_contribution.questionnaires.set([questionnaire_1])
- make_rating_answer_counters(question_1, evaluation_1.general_contribution)
+ make_rating_answer_counters(question_1, evaluation_1.general_contribution, [1, 1, 0, 0, 0])
evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2])
- make_rating_answer_counters(question_1, evaluation_2.general_contribution)
+ make_rating_answer_counters(question_1, evaluation_2.general_contribution, [1, 2, 0, 0, 0])
make_rating_answer_counters(question_2, evaluation_2.general_contribution)
cache_results(evaluation_1)
@@ -730,10 +672,14 @@ def test_not_all_contributions_are_created_equal(self):
binary_content.seek(0)
workbook = xlrd.open_workbook(file_contents=binary_content.read())
- self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3]))
- self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) # testing empty heading for average
+ self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335)
self.assertEqual(
workbook.sheets()[0].row_values(0)[1],
"Average for this question over all evaluations in all published semesters",
)
+
+ # average for second questionnaire must be the value from evaluation2 (since evaluation1 doesn't have the questionnaire)
+ self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3]))
+ # average field next to the questionnaire title must be empty
+ self.assertEqual("", workbook.sheets()[0].row_values(8)[2])
From ce5ee00d08b7152221b1328017b5fe3b6cd52d23 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 19 May 2025 18:31:37 +0200
Subject: [PATCH 14/20] revert changes
---
evap/evaluation/models.py | 9 +++++++++
evap/evaluation/tests/test_models.py | 1 +
evap/results/tools.py | 4 ++--
.../templates/staff_evaluation_textanswers_section.html | 3 +++
evap/staff/views.py | 1 +
5 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py
index 96629227fa..e558def6f3 100644
--- a/evap/evaluation/models.py
+++ b/evap/evaluation/models.py
@@ -1628,6 +1628,7 @@ class ReviewDecision(models.TextChoices):
"""
PUBLIC = "PU", _("public")
+ PRIVATE = "PR", _("private") # This answer should only be displayed to the contributor the question was about
DELETED = "DE", _("deleted")
UNDECIDED = "UN", _("undecided")
@@ -1657,6 +1658,10 @@ class Meta:
def will_be_deleted(self):
return self.review_decision == self.ReviewDecision.DELETED
+ @property
+ def will_be_private(self):
+ return self.review_decision == self.ReviewDecision.PRIVATE
+
@property
def will_be_public(self):
return self.review_decision == self.ReviewDecision.PUBLIC
@@ -1667,6 +1672,10 @@ def will_be_public(self):
def is_public(self):
return self.will_be_public
+ @property
+ def is_private(self):
+ return self.will_be_private
+
@property
def is_reviewed(self):
return self.review_decision != self.ReviewDecision.UNDECIDED
diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py
index 8dca0d7306..7f3751a4cc 100644
--- a/evap/evaluation/tests/test_models.py
+++ b/evap/evaluation/tests/test_models.py
@@ -386,6 +386,7 @@ def test_textanswers_to_delete_get_deleted_on_publish(self):
[
TextAnswer.ReviewDecision.DELETED,
TextAnswer.ReviewDecision.PUBLIC,
+ TextAnswer.ReviewDecision.PRIVATE,
]
),
_quantity=3,
diff --git a/evap/results/tools.py b/evap/results/tools.py
index 29a5726f42..6fdf8ae064 100644
--- a/evap/results/tools.py
+++ b/evap/results/tools.py
@@ -231,7 +231,7 @@ def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool =
((textanswer.contribution_id, textanswer.assignment_id), textanswer)
for contribution in evaluation.contributions.all()
for textanswer in contribution.textanswer_set.all()
- if textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC
+ if textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC]
)
racs_per_contribution_assignment: dict[tuple[int, int], list[RatingAnswerCounter]] = unordered_groupby(
@@ -483,7 +483,7 @@ def can_textanswer_be_seen_by( # noqa: PLR0911,PLR0912
view_general_results: ViewGeneralResults,
view_contributor_results: ViewContributorResults,
) -> bool:
- assert textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC
+ assert textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC]
contributor = textanswer.contribution.contributor
# NOTE: when changing this behavior, make sure all changes are also reflected in results.tools.textanswers_visible_to
diff --git a/evap/staff/templates/staff_evaluation_textanswers_section.html b/evap/staff/templates/staff_evaluation_textanswers_section.html
index d6ef8c0942..1a296e5b41 100644
--- a/evap/staff/templates/staff_evaluation_textanswers_section.html
+++ b/evap/staff/templates/staff_evaluation_textanswers_section.html
@@ -39,6 +39,9 @@
+
+
+
diff --git a/evap/staff/views.py b/evap/staff/views.py
index 4666bca574..137e8baf3b 100644
--- a/evap/staff/views.py
+++ b/evap/staff/views.py
@@ -1740,6 +1740,7 @@ def evaluation_textanswers_update_publish(request):
review_decision_for_action = {
"publish": TextAnswer.ReviewDecision.PUBLIC,
+ "make_private": TextAnswer.ReviewDecision.PRIVATE,
"delete": TextAnswer.ReviewDecision.DELETED,
"unreview": TextAnswer.ReviewDecision.UNDECIDED,
}
From b215b45c824c824bdbf05b642e865e6fe21bf3e9 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 19:22:42 +0200
Subject: [PATCH 15/20] Update evap/results/exporters.py
Co-authored-by: Niklas Mohrin
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 1032c008d9..e0a0fec10c 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -218,7 +218,7 @@ def write_headings_and_evaluation_info(
self.next_row()
self.write_cell(_("Programs"), "bold")
- self.write_cell("", "program")
+ self.write_cell("", "program") # empty cell in grade-average column
for evaluation, __ in evaluations_with_results:
self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program")
From 8f702e6902850cdf6e4f3d2a05fc7dbd53759783 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 19:25:34 +0200
Subject: [PATCH 16/20] Update evap/results/exporters.py
Co-authored-by: Niklas Mohrin
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index e0a0fec10c..c9e788585a 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -206,7 +206,7 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
- self.write_cell(_("Average for this question over all evaluations in all published semesters"), "average")
+ self.write_cell(_("Average result for this question over all published evaluations in all semesters"), "average")
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
From 08af73db7ac36a9f944019ca7dca6dd3960240b0 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 19:25:45 +0200
Subject: [PATCH 17/20] Update evap/results/exporters.py
Co-authored-by: Niklas Mohrin
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index c9e788585a..1210ec51a9 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -224,7 +224,7 @@ def write_headings_and_evaluation_info(
self.next_row()
self.write_cell(_("Course Type"), "bold")
- self.write_cell("", "program")
+ self.write_cell("", "border_left_right") # empty cell in grade-average column
for evaluation, __ in evaluations_with_results:
self.write_cell(evaluation.course.type.name, "border_left_right")
From 88a4f21bf8b350be65cdf2fcf049008ff9a3a91d Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 20:36:16 +0200
Subject: [PATCH 18/20] Update evap/results/exporters.py
Co-authored-by: Niklas Mohrin
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 1210ec51a9..617540952a 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -306,7 +306,7 @@ def _get_average_grade_and_approval_ratio(
value_sum += grade_result.average * grade_result.count_sum
count_sum += grade_result.count_sum
- if grade_result.question.is_yes_no_question:
+ if question.is_yes_no_question:
approval_count += grade_result.approval_count
if not value_sum:
From 7352bd9c6d4a010696990889117d5badd852b579 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 20:36:34 +0200
Subject: [PATCH 19/20] Update evap/results/exporters.py
Co-authored-by: Niklas Mohrin
---
evap/results/exporters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index 617540952a..d574809728 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -309,7 +309,7 @@ def _get_average_grade_and_approval_ratio(
if question.is_yes_no_question:
approval_count += grade_result.approval_count
- if not value_sum:
+ if not count_sum:
return None, None
avg = value_sum / count_sum
From 93a347c5e3301e4a995d7ceaf593f454b89371b7 Mon Sep 17 00:00:00 2001
From: fidoriel <49869342+fidoriel@users.noreply.github.com>
Date: Mon, 20 Oct 2025 20:55:11 +0200
Subject: [PATCH 20/20] make it work with recent changes
---
evap/results/exporters.py | 4 +++-
evap/results/tests/test_exporters.py | 13 +------------
2 files changed, 4 insertions(+), 13 deletions(-)
diff --git a/evap/results/exporters.py b/evap/results/exporters.py
index d574809728..53da65d366 100644
--- a/evap/results/exporters.py
+++ b/evap/results/exporters.py
@@ -206,7 +206,9 @@ def write_headings_and_evaluation_info(
else:
self.write_cell(export_name, "headline")
- self.write_cell(_("Average result for this question over all published evaluations in all semesters"), "average")
+ self.write_cell(
+ _("Average result for this question over all published evaluations in all semesters"), "average"
+ )
for evaluation, __ in evaluations_with_results:
title = evaluation.full_name
diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py
index 7dc9ba4236..c1393e706c 100644
--- a/evap/results/tests/test_exporters.py
+++ b/evap/results/tests/test_exporters.py
@@ -357,17 +357,6 @@ def test_no_program_or_course_type(self):
with self.assertRaises(AssertionError):
ResultsExporter().export(BytesIO(), [evaluation.course.semester], [])
- def test_exclude_single_result(self):
- program = baker.make(Program)
- evaluation = baker.make(
- Evaluation, is_single_result=True, state=Evaluation.State.PUBLISHED, course__programs=[program]
- )
- cache_results(evaluation)
- sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id])
- self.assertEqual(
- len(sheet.row_values(0)), 2, "There should be no column for the evaluation, only the row description"
- )
-
def test_exclude_used_but_unanswered_questionnaires(self):
program = baker.make(Program)
evaluation = baker.make(
@@ -676,7 +665,7 @@ def test_total_average(self):
self.assertEqual(
workbook.sheets()[0].row_values(0)[1],
- "Average for this question over all evaluations in all published semesters",
+ "Average result for this question over all published evaluations in all semesters",
)
# average for second questionnaire must be the value from evaluation2 (since evaluation1 doesn't have the questionnaire)